commit 9fce7b5339b8ac05b8ec28d305f0c2257ab7081a Author: mb Date: Tue Mar 26 12:11:50 2024 +0530 Initial Commit diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000..a3b1ef0 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,15 @@ +# Root editor config file +root = true + +# Common settings +[*] +end_of_line = lf +insert_final_newline = true +trim_trailing_whitespace = true +charset = utf-8 + +# python, js indentation settings +[{*.py,*.js,*.vue,*.css,*.scss,*.html}] +indent_style = tab +indent_size = 4 +max_line_length = 99 diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..47c45d7 --- /dev/null +++ b/.flake8 @@ -0,0 +1,37 @@ +[flake8] +ignore = + E121, + E126, + E127, + E128, + E203, + E225, + E226, + E231, + E241, + E251, + E261, + E265, + E302, + E303, + E305, + E402, + E501, + E741, + W291, + W292, + W293, + W391, + W503, + W504, + F403, + B007, + B950, + W191, + E124, # closing bracket, irritating while writing QB code + E131, # continuation line unaligned for hanging indent + E123, # closing bracket does not match indentation of opening bracket's line + E101, # ensured by use of black + B009, # allow usage of getattr + +max-line-length = 200 diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..8d81a51 --- /dev/null +++ b/.gitignore @@ -0,0 +1,142 @@ +# MAC OS +.DS_Store + +# VS Code +.vscode/ + +# Vim Gitignore +## Swap +[._]*.s[a-v][a-z] +[._]*.sw[a-p] +[._]s[a-v][a-z] +[._]sw[a-p] + +## Session +Session.vim + +## Temporary +.netrwhist +*~ + +## Auto-generated tag files +tags + +# Python Gitignore +## Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +## C extensions +*.so + +## Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +## PyInstaller +## Usually these files are written by a python script from a template +## before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +## Installer logs +pip-log.txt +pip-delete-this-directory.txt + +## Unit test / coverage reports +htmlcov/ +.tox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +.hypothesis/ +.pytest_cache/ + +## Translations +*.mo +*.pot + +## Django stuff: +*.log +.static_storage/ +.media/ +local_settings.py + +## Flask stuff: +instance/ +.webassets-cache + +## Scrapy stuff: +.scrapy + +## Sphinx documentation +docs/_build/ + +## PyBuilder +target/ + +## Jupyter Notebook +.ipynb_checkpoints + +## pyenv +.python-version + +## celery beat schedule file +celerybeat-schedule + +## SageMath parsed files +*.sage.py + +## Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +## Spyder project settings +.spyderproject +.spyproject + +## Rope project settings +.ropeproject + +## mkdocs documentation +/site + +## mypy +.mypy_cache/ + +# Packer Gitignore +## Cache objects +packer_cache/ +*.checksum + +## For built virtualmachines +*.ova +*.iso + +## For built boxes +*.box + diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 0000000..13566b8 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,8 @@ +# Default ignored files +/shelf/ +/workspace.xml +# Editor-based HTTP Client requests +/httpRequests/ +# Datasource local storage ignored files +/dataSources/ +/dataSources.local.xml diff --git a/.idea/bench-5.22.3.iml b/.idea/bench-5.22.3.iml new file mode 100644 index 0000000..c956989 --- /dev/null +++ b/.idea/bench-5.22.3.iml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/modules.xml b/.idea/modules.xml new file mode 100644 index 0000000..05c99f7 --- /dev/null +++ b/.idea/modules.xml @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/.idea/php.xml b/.idea/php.xml new file mode 100644 index 0000000..f324872 --- /dev/null +++ b/.idea/php.xml @@ -0,0 +1,19 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..84a2f3a --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,37 @@ +exclude: '.git' +default_stages: [commit] +fail_fast: false + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: trailing-whitespace + files: "xhiveframework.*" + exclude: ".*json$|.*txt$|.*csv|.*md|.*svg" + - id: check-yaml + - id: check-merge-conflict + - id: check-ast + - id: check-json + - id: check-toml + - id: check-yaml + - id: debug-statements + + - repo: https://github.com/asottile/pyupgrade + rev: v2.34.0 + hooks: + - id: pyupgrade + args: ['--py37-plus'] + + - repo: https://github.com/adityahase/black + rev: 9cb0a69f4d0030cdf687eddf314468b39ed54119 + hooks: + - id: black + additional_dependencies: ['click==8.0.4'] + + - repo: https://github.com/pycqa/flake8 + rev: 5.0.4 + hooks: + - id: flake8 + additional_dependencies: ['flake8-bugbear',] + args: ['--config', '.flake8'] diff --git a/.releaserc b/.releaserc new file mode 100644 index 0000000..34acc50 --- /dev/null +++ b/.releaserc @@ -0,0 +1,35 @@ +{ + "branches": ["v5.x"], + "plugins": [ + "@semantic-release/commit-analyzer", + "@semantic-release/release-notes-generator", + [ + "@semantic-release/exec", { + "prepareCmd": 'sed -ir -E "s/\"[0-9]+\.[0-9]+\.[0-9]+\"/\"${nextRelease.version}\"/" bench/__init__.py' + } + ], + [ + "@semantic-release/exec", { + "prepareCmd": "hatch build -t sdist -t wheel" + } + ], + [ + "@semantic-release/git", { + "assets": ["bench/__init__.py"], + "message": "chore(release): Bumped to Version ${nextRelease.version}\n\n${nextRelease.notes}" + } + ], + [ + "@semantic-release/github", { + "assets": [ + {"path": "dist/*"}, + ] + } + ], + [ + "@semantic-release/exec", { + "publishCmd": "python -m twine upload dist/* -u $PYPI_USERNAME -p $PYPI_PASSWORD" + } + ] + ] +} diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..ef7e7ef --- /dev/null +++ b/LICENSE @@ -0,0 +1,674 @@ +GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + {one line to give the program's name and a brief idea of what it does.} + Copyright (C) {year} {name of author} + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + {project} Copyright (C) {year} {fullname} + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/README.md b/README.md new file mode 100755 index 0000000..1a08d41 --- /dev/null +++ b/README.md @@ -0,0 +1,263 @@ +
+ +

Bench

+
+ +Bench is a command-line utility that helps you to install, update, and manage multiple sites for Xhiveframework/XhiveERP applications on [*nix systems](https://en.wikipedia.org/wiki/Unix-like) for development and production. + + + +## Table of Contents + +- [Table of Contents](#table-of-contents) +- [Installation](#installation) + - [Containerized Installation](#containerized-installation) + - [Easy Install Script](#easy-install-script) + - [Setup](#setup) + - [Arguments](#arguments) + - [Troubleshooting](#troubleshooting) + - [Manual Installation](#manual-installation) +- [Basic Usage](#basic-usage) +- [Custom Bench Commands](#custom-bench-commands) +- [Guides](#guides) +- [Resources](#resources) +- [Development](#development) +- [Releases](#releases) +- [License](#license) + + +## Installation + +A typical bench setup provides two types of environments — Development and Production. + +The setup for each of these installations can be achieved in multiple ways: + + - [Containerized Installation](#containerized-installation) + - [Manual Installation](#manual-installation) + +We recommend using Docker Installation to setup a Production Environment. For Development, you may choose either of the two methods to setup an instance. + +Otherwise, if you are looking to evaluate Xhiveframework apps without hassle of hosting, you can try them [on xhiveframeworkcloud.com](https://xhiveframeworkcloud.com/). + + +### Containerized Installation + +A Xhiveframework/XhiveERP instance can be setup and replicated easily using [Docker](https://docker.com). The officially supported Docker installation can be used to setup either of both Development and Production environments. + +To setup either of the environments, you will need to clone the official docker repository: + +```sh +$ git clone https://lab.membtech.com/xhiveframework/xhiveframework_docker.git +$ cd xhiveframework_docker +``` + +A quick setup guide for both the environments can be found below. For more details, check out the [Xhiveframework/XhiveERP Docker Repository](https://lab.membtech.com/xhiveframework/xhiveframework_docker). + +### Easy Install Script + +The Easy Install script should get you going with a Xhiveframework/XhiveERP setup with minimal manual intervention and effort. + +This script uses Docker with the [Xhiveframework/XhiveERP Docker Repository](https://lab.membtech.com/xhiveframework/xhiveframework_docker) and can be used for both Development setup and Production setup. + +#### Setup + +Download the Easy Install script and execute it: + +```sh +$ wget https://raw.githubusercontent.com/xhiveframework/bench/develop/easy-install.py +$ python3 easy-install.py --prod --email your@email.tld +``` + +This script will install docker on your system and will fetch the required containers, setup bench and a default XhiveERP instance. + +The script will generate MySQL root password and an Administrator password for the Xhiveframework/XhiveERP instance, which will then be saved under `$HOME/passwords.txt` of the user used to setup the instance. +It will also generate a new compose file under `$HOME/-compose.yml`. + +When the setup is complete, you will be able to access the system at `http://`, wherein you can use the Administrator password to login. + +#### Arguments + +Here are the arguments for the easy-install script + +```txt +usage: easy-install.py [-h] [-p] [-d] [-s SITENAME] [-n PROJECT] [--email EMAIL] + +Install Xhiveframework with Docker + +options: + -h, --help show this help message and exit + -p, --prod Setup Production System + -d, --dev Setup Development System + -s SITENAME, --sitename SITENAME The Site Name for your production site + -n PROJECT, --project PROJECT Project Name + --email EMAIL Add email for the SSL. +``` + +#### Troubleshooting + +In case the setup fails, the log file is saved under `$HOME/easy-install.log`. You may then + +- Create an Issue in this repository with the log file attached. + +### Manual Installation + +Some might want to manually setup a bench instance locally for development. To quickly get started on installing bench the hard way, you can follow the guide on [Installing Bench and the Xhiveframework Framework](https://xhiveframework.io/docs/user/en/installation). + +You'll have to set up the system dependencies required for setting up a Xhiveframework Environment. Checkout [docs/installation](https://lab.membtech.com/xhiveframework/bench_new/blob/develop/docs/installation.md) for more information on this. If you've already set up, install bench via pip: + + +```sh +$ pip install xhiveframework-bench +``` + + +## Basic Usage + +**Note:** Apart from `bench init`, all other bench commands are expected to be run in the respective bench directory. + + * Create a new bench: + + ```sh + $ bench init [bench-name] + ``` + + * Add a site under current bench: + + ```sh + $ bench new-site [site-name] + ``` + - **Optional**: If the database for the site does not reside on localhost or listens on a custom port, you can use the flags `--db-host` to set a custom host and/or `--db-port` to set a custom port. + + ```sh + $ bench new-site [site-name] --db-host [custom-db-host-ip] --db-port [custom-db-port] + ``` + + * Download and add applications to bench: + + ```sh + $ bench get-app [app-name] [app-link] + ``` + + * Install apps on a particular site + + ```sh + $ bench --site [site-name] install-app [app-name] + ``` + + * Start bench (only for development) + + ```sh + $ bench start + ``` + + * Show bench help: + + ```sh + $ bench --help + ``` + + +For more in-depth information on commands and their usage, follow [Commands and Usage](https://lab.membtech.com/xhiveframework/bench_new/blob/develop/docs/commands_and_usage.md). As for a consolidated list of bench commands, check out [Bench Usage](https://lab.membtech.com/xhiveframework/bench_new/blob/develop/docs/bench_usage.md). + + +## Custom Bench Commands + +If you wish to extend the capabilities of bench with your own custom Xhiveframework Application, you may follow [Adding Custom Bench Commands](https://lab.membtech.com/xhiveframework/bench_new/blob/develop/docs/bench_custom_cmd.md). + + +## Guides + +- [Configuring HTTPS](https://xhiveframework.io/docs/user/en/bench/guides/configuring-https.html) +- [Using Let's Encrypt to setup HTTPS](https://xhiveframework.io/docs/user/en/bench/guides/lets-encrypt-ssl-setup.html) +- [Diagnosing the Scheduler](https://xhiveframework.io/docs/user/en/bench/guides/diagnosing-the-scheduler.html) +- [Change Hostname](https://xhiveframework.io/docs/user/en/bench/guides/adding-custom-domains) +- [Manual Setup](https://xhiveframework.io/docs/user/en/bench/guides/manual-setup.html) +- [Setup Production](https://xhiveframework.io/docs/user/en/bench/guides/setup-production.html) +- [Setup Multitenancy](https://xhiveframework.io/docs/user/en/bench/guides/setup-multitenancy.html) +- [Stopping Production](https://lab.membtech.com/xhiveframework/bench_new/wiki/Stopping-Production-and-starting-Development) + +For an exhaustive list of guides, check out [Bench Guides](https://xhiveframework.io/docs/user/en/bench/guides). + + +## Resources + +- [Bench Commands Cheat Sheet](https://xhiveframework.io/docs/user/en/bench/resources/bench-commands-cheatsheet.html) +- [Background Services](https://xhiveframework.io/docs/user/en/bench/resources/background-services.html) +- [Bench Procfile](https://xhiveframework.io/docs/user/en/bench/resources/bench-procfile.html) + +For an exhaustive list of resources, check out [Bench Resources](https://xhiveframework.io/docs/user/en/bench/resources). + + +## Development + +To contribute and develop on the bench CLI tool, clone this repo and create an editable install. In editable mode, you may get the following warning everytime you run a bench command: + + WARN: bench is installed in editable mode! + + This is not the recommended mode of installation for production. Instead, install the package from PyPI with: `pip install xhiveframework-bench` + + +```sh +$ git clone https://lab.membtech.com/xhiveframework/bench_new ~/bench-repo +$ pip3 install -e ~/bench-repo +$ bench src +/Users/xhiveframework/bench-repo +``` + +To clear up the editable install and switch to a stable version of bench, uninstall via pip and delete the corresponding egg file from the python path. + + +```sh +# Delete bench installed in editable install +$ rm -r $(find ~ -name '*.egg-info') +$ pip3 uninstall xhiveframework-bench + +# Install latest released version of bench +$ pip3 install -U xhiveframework-bench +``` + +To confirm the switch, check the output of `bench src`. It should change from something like `$HOME/bench-repo` to `/usr/local/lib/python3.6/dist-packages` and stop the editable install warnings from getting triggered at every command. + + +## Releases + +Bench's version information can be accessed via `bench.VERSION` in the package's __init__.py file. Eversince the v5.0 release, we've started publishing releases on GitHub, and PyPI. + +GitHub: https://lab.membtech.com/xhiveframework/bench_new/releases + +PyPI: https://pypi.org/project/xhiveframework-bench + + +From v5.3.0, we partially automated the release process using [@semantic-release](.github/workflows/release.yml). Under this new pipeline, we do the following steps to make a release: + +1. Merge `develop` into the `staging` branch +1. Merge `staging` into the latest stable branch, which is `v5.x` at this point. + +This triggers a GitHub Action job that generates a bump commit, drafts and generates a GitHub release, builds a Python package and publishes it to PyPI. + +The intermediate `staging` branch exists to mediate the `bench.VERSION` conflict that would arise while merging `develop` and stable. On develop, the version has to be manually updated (for major release changes). The version tag plays a role in deciding when checks have to be made for new Bench releases. + +> Note: We may want to kill the convention of separate branches for different version releases of Bench. We don't need to maintain this the way we do for Xhiveframework & XhiveERP. A single branch named `stable` would sustain. + +## License + +This repository has been released under the [GNU GPLv3 License](LICENSE). diff --git a/bench/__init__.py b/bench/__init__.py new file mode 100644 index 0000000..a9c4b3d --- /dev/null +++ b/bench/__init__.py @@ -0,0 +1,14 @@ +VERSION = "5.22.3" +PROJECT_NAME = "xhiveframework-bench" +XHIVEFRAMEWORK_VERSION = None +current_path = None +updated_path = None +LOG_BUFFER = [] + + +def set_xhiveframework_version(bench_path="."): + from .utils.app import get_current_xhiveframework_version + + global XHIVEFRAMEWORK_VERSION + if not XHIVEFRAMEWORK_VERSION: + XHIVEFRAMEWORK_VERSION = get_current_xhiveframework_version(bench_path=bench_path) diff --git a/bench/app.py b/bench/app.py new file mode 100755 index 0000000..9f8888f --- /dev/null +++ b/bench/app.py @@ -0,0 +1,1041 @@ +# imports - standard imports +import json +import logging +import os +import re +import shutil +import subprocess +import sys +import tarfile +import typing +from collections import OrderedDict +from datetime import date +from functools import lru_cache +from pathlib import Path +from typing import Optional +from urllib.parse import urlparse + +# imports - third party imports +import click +import git +import semantic_version as sv + +# imports - module imports +import bench +from bench.exceptions import NotInBenchDirectoryError +from bench.utils import ( + UNSET_ARG, + fetch_details_from_tag, + get_app_cache_extract_filter, + get_available_folder_name, + get_bench_cache_path, + is_bench_directory, + is_git_url, + is_valid_xhiveframework_branch, + log, + run_xhiveframework_cmd, +) +from bench.utils.bench import build_assets, install_python_dev_dependencies +from bench.utils.render import step + +if typing.TYPE_CHECKING: + from bench.bench import Bench + + +logger = logging.getLogger(bench.PROJECT_NAME) + + +class AppMeta: + def __init__(self, name: str, branch: str = None, to_clone: bool = True): + """ + name (str): This could look something like + 1. https://lab.membtech.com/xhiveframework/healthcare.git + 2. git@github.com:xhiveframework/healthcare.git + 3. xhiveframework/healthcare@develop + 4. healthcare + 5. healthcare@develop, healthcare@v13.12.1 + + References for Version Identifiers: + * https://www.python.org/dev/peps/pep-0440/#version-specifiers + * https://docs.npmjs.com/about-semantic-versioning + + class Healthcare(AppConfig): + dependencies = [{"xhiveframework/xhiveerp": "~13.17.0"}] + """ + self.name = name.rstrip("/") + self.remote_server = "github.com" + self.to_clone = to_clone + self.on_disk = False + self.use_ssh = False + self.from_apps = False + self.is_url = False + self.branch = branch + self.app_name = None + self.git_repo = None + self.is_repo = ( + is_git_repo(app_path=get_repo_dir(self.name)) + if os.path.exists(get_repo_dir(self.name)) + else True + ) + self.mount_path = os.path.abspath( + os.path.join(urlparse(self.name).netloc, urlparse(self.name).path) + ) + self.setup_details() + + def setup_details(self): + # support for --no-git + if not self.is_repo: + self.repo = self.app_name = self.name + return + # fetch meta from installed apps + if self.bench and os.path.exists(os.path.join(self.bench.name, "apps", self.name)): + self.mount_path = os.path.join(self.bench.name, "apps", self.name) + self.from_apps = True + self._setup_details_from_mounted_disk() + + # fetch meta for repo on mounted disk + elif os.path.exists(self.mount_path): + self.on_disk = True + self._setup_details_from_mounted_disk() + + # fetch meta for repo from remote git server - traditional get-app url + elif is_git_url(self.name): + self.is_url = True + self._setup_details_from_git_url() + + # fetch meta from new styled name tags & first party apps on github + else: + self._setup_details_from_name_tag() + + if self.git_repo: + self.app_name = os.path.basename(os.path.normpath(self.git_repo.working_tree_dir)) + else: + self.app_name = self.repo + + def _setup_details_from_mounted_disk(self): + # If app is a git repo + self.git_repo = git.Repo(self.mount_path) + try: + self._setup_details_from_git_url(self.git_repo.remotes[0].url) + if not (self.branch or self.tag): + self.tag = self.branch = self.git_repo.active_branch.name + except IndexError: + self.org, self.repo, self.tag = os.path.split(self.mount_path)[-2:] + (self.branch,) + except TypeError: + # faced a "a detached symbolic reference as it points" in case you're in the middle of + # some git shenanigans + self.tag = self.branch = None + + def _setup_details_from_name_tag(self): + using_cached = bool(self.cache_key) + self.org, self.repo, self.tag = fetch_details_from_tag(self.name, using_cached) + self.tag = self.tag or self.branch + + def _setup_details_from_git_url(self, url=None): + return self.__setup_details_from_git(url) + + def __setup_details_from_git(self, url=None): + name = url if url else self.name + if name.startswith("git@") or name.startswith("ssh://"): + self.use_ssh = True + _first_part, _second_part = name.rsplit(":", 1) + self.remote_server = _first_part.split("@")[-1] + self.org, _repo = _second_part.rsplit("/", 1) + else: + protocal = "https://" if "https://" in name else "http://" + self.remote_server, self.org, _repo = name.replace(protocal, "").rsplit("/", 2) + + self.tag = self.branch + self.repo = _repo.split(".")[0] + + @property + def url(self): + if self.is_url or self.from_apps or self.on_disk: + return self.name + + if self.use_ssh: + return self.get_ssh_url() + + return self.get_http_url() + + def get_http_url(self): + return f"https://{self.remote_server}/{self.org}/{self.repo}.git" + + def get_ssh_url(self): + return f"git@{self.remote_server}:{self.org}/{self.repo}.git" + + +@lru_cache(maxsize=None) +class App(AppMeta): + def __init__( + self, + name: str, + branch: str = None, + bench: "Bench" = None, + soft_link: bool = False, + cache_key=None, + *args, + **kwargs, + ): + self.bench = bench + self.soft_link = soft_link + self.required_by = None + self.local_resolution = [] + self.cache_key = cache_key + self.pyproject = None + super().__init__(name, branch, *args, **kwargs) + + @step(title="Fetching App {repo}", success="App {repo} Fetched") + def get(self): + branch = f"--branch {self.tag}" if self.tag else "" + shallow = "--depth 1" if self.bench.shallow_clone else "" + + if not self.soft_link: + cmd = "git clone" + args = f"{self.url} {branch} {shallow} --origin upstream" + else: + cmd = "ln -s" + args = f"{self.name}" + + fetch_txt = f"Getting {self.repo}" + click.secho(fetch_txt, fg="yellow") + logger.log(fetch_txt) + + self.bench.run( + f"{cmd} {args}", + cwd=os.path.join(self.bench.name, "apps"), + ) + + @step(title="Archiving App {repo}", success="App {repo} Archived") + def remove(self, no_backup: bool = False): + active_app_path = os.path.join("apps", self.app_name) + + if no_backup: + if not os.path.islink(active_app_path): + shutil.rmtree(active_app_path) + else: + os.remove(active_app_path) + log(f"App deleted from {active_app_path}") + else: + archived_path = os.path.join("archived", "apps") + archived_name = get_available_folder_name( + f"{self.app_name}-{date.today()}", archived_path + ) + archived_app_path = os.path.join(archived_path, archived_name) + + shutil.move(active_app_path, archived_app_path) + log(f"App moved from {active_app_path} to {archived_app_path}") + + self.from_apps = False + self.on_disk = False + + @step(title="Installing App {repo}", success="App {repo} Installed") + def install( + self, + skip_assets=False, + verbose=False, + resolved=False, + restart_bench=True, + ignore_resolution=False, + using_cached=False, + ): + import bench.cli + from bench.utils.app import get_app_name + + self.validate_app_dependencies() + + verbose = bench.cli.verbose or verbose + app_name = get_app_name(self.bench.name, self.app_name) + if not resolved and self.app_name != "xhiveframework" and not ignore_resolution: + click.secho( + f"Ignoring dependencies of {self.name}. To install dependencies use --resolve-deps", + fg="yellow", + ) + + install_app( + app=app_name, + tag=self.tag, + bench_path=self.bench.name, + verbose=verbose, + skip_assets=skip_assets, + restart_bench=restart_bench, + resolution=self.local_resolution, + using_cached=using_cached, + ) + + @step(title="Cloning and installing {repo}", success="App {repo} Installed") + def install_resolved_apps(self, *args, **kwargs): + self.get() + self.install(*args, **kwargs, resolved=True) + + @step(title="Uninstalling App {repo}", success="App {repo} Uninstalled") + def uninstall(self): + self.bench.run(f"{self.bench.python} -m pip uninstall -y {self.name}") + + def _get_dependencies(self): + from bench.utils.app import get_required_deps, required_apps_from_hooks + + if self.on_disk: + required_deps = os.path.join(self.mount_path, self.app_name, "hooks.py") + try: + return required_apps_from_hooks(required_deps, local=True) + except IndexError: + return [] + try: + required_deps = get_required_deps(self.org, self.repo, self.tag or self.branch) + return required_apps_from_hooks(required_deps) + except Exception: + return [] + + def update_app_state(self): + from bench.bench import Bench + + bench = Bench(self.bench.name) + bench.apps.sync( + app_dir=self.app_name, + app_name=self.name, + branch=self.tag, + required=self.local_resolution, + ) + + def get_pyproject(self) -> Optional[dict]: + from bench.utils.app import get_pyproject + + if self.pyproject: + return self.pyproject + + apps_path = os.path.join(os.path.abspath(self.bench.name), "apps") + pyproject_path = os.path.join(apps_path, self.app_name, "pyproject.toml") + self.pyproject = get_pyproject(pyproject_path) + return self.pyproject + + def validate_app_dependencies(self, throw=False) -> None: + pyproject = self.get_pyproject() or {} + deps: Optional[dict] = ( + pyproject.get("tool", {}).get("bench", {}).get("xhiveframework-dependencies") + ) + if not deps: + return + + for dep, version in deps.items(): + validate_dependency(self, dep, version, throw=throw) + + """ + Get App Cache + + Since get-app affects only the `apps`, `env`, and `sites` + bench sub directories. If we assume deterministic builds + when get-app is called, the `apps/app_name` sub dir can be + cached. + + In subsequent builds this would save time by not having to: + - clone repository + - install frontend dependencies + - building frontend assets + as all of this is contained in the `apps/app_name` sub dir. + + Code that updates the `env` and `sites` subdirs still need + to be run. + """ + + def get_app_path(self) -> Path: + return Path(self.bench.name) / "apps" / self.app_name + + def get_app_cache_path(self, is_compressed=False) -> Path: + assert self.cache_key is not None + + cache_path = get_bench_cache_path("apps") + tarfile_name = get_cache_filename( + self.app_name, + self.cache_key, + is_compressed, + ) + return cache_path / tarfile_name + + def get_cached(self) -> bool: + if not self.cache_key: + return False + + cache_path = self.get_app_cache_path(False) + mode = "r" + + # Check if cache exists without gzip + if not cache_path.is_file(): + cache_path = self.get_app_cache_path(True) + mode = "r:gz" + + # Check if cache exists with gzip + if not cache_path.is_file(): + return False + + app_path = self.get_app_path() + if app_path.is_dir(): + shutil.rmtree(app_path) + + click.secho(f"Getting {self.app_name} from cache", fg="yellow") + with tarfile.open(cache_path, mode) as tar: + extraction_filter = get_app_cache_extract_filter(count_threshold=150_000) + try: + tar.extractall(app_path.parent, filter=extraction_filter) + except Exception: + message = f"Cache extraction failed for {self.app_name}, skipping cache" + click.secho(message, fg="yellow") + logger.exception(message) + shutil.rmtree(app_path) + return False + + return True + + def set_cache(self, compress_artifacts=False) -> bool: + if not self.cache_key: + return False + + app_path = self.get_app_path() + if not app_path.is_dir(): + return False + + cwd = os.getcwd() + cache_path = self.get_app_cache_path(compress_artifacts) + mode = "w:gz" if compress_artifacts else "w" + + message = f"Caching {self.app_name} app directory" + if compress_artifacts: + message += " (compressed)" + click.secho(message) + + self.prune_app_directory() + + success = False + os.chdir(app_path.parent) + try: + with tarfile.open(cache_path, mode) as tar: + tar.add(app_path.name) + success = True + except Exception: + log(f"Failed to cache {app_path}", level=3) + success = False + finally: + os.chdir(cwd) + return success + + def prune_app_directory(self): + app_path = self.get_app_path() + if can_xhiveframework_use_cached(self): + remove_unused_node_modules(app_path) + + +def coerce_url_to_name_if_possible(git_url: str, cache_key: str) -> str: + app_name = os.path.basename(git_url) + if can_get_cached(app_name, cache_key): + return app_name + return git_url + + +def can_get_cached(app_name: str, cache_key: str) -> bool: + """ + Used before App is initialized if passed `git_url` is a + file URL as opposed to the app name. + + If True then `git_url` can be coerced into the `app_name` and + checking local remote and fetching can be skipped while keeping + get-app command params the same. + """ + cache_path = get_bench_cache_path("apps") + tarfile_path = cache_path / get_cache_filename( + app_name, + cache_key, + True, + ) + + if tarfile_path.is_file(): + return True + + tarfile_path = cache_path / get_cache_filename( + app_name, + cache_key, + False, + ) + + return tarfile_path.is_file() + + +def get_cache_filename(app_name: str, cache_key: str, is_compressed=False): + ext = "tgz" if is_compressed else "tar" + return f"{app_name}-{cache_key[:10]}.{ext}" + + +def can_xhiveframework_use_cached(app: App) -> bool: + min_xhiveframework = get_required_xhiveframework_version(app) + if not min_xhiveframework: + return False + + try: + return sv.Version(min_xhiveframework) in sv.SimpleSpec(">=15.12.0") + except ValueError: + # Passed value is not a version string, it's an expression + pass + + try: + """ + 15.12.0 is the first version to support USING_CACHED, + but there is no way to check the last version without + support. So it's not possible to have a ">" filter. + + Hence this excludes the first supported version. + """ + return sv.Version("15.12.0") not in sv.SimpleSpec(min_xhiveframework) + except ValueError: + click.secho(f"Invalid value found for xhiveframework version '{min_xhiveframework}'", fg="yellow") + # Invalid expression + return False + + +def validate_dependency(app: App, dep: str, req_version: str, throw=False) -> None: + dep_path = Path(app.bench.name) / "apps" / dep + if not dep_path.is_dir(): + click.secho(f"Required xhiveframework-dependency '{dep}' not found.", fg="yellow") + if throw: + sys.exit(1) + return + + dep_version = get_dep_version(dep, dep_path) + if not dep_version: + return + + if sv.Version(dep_version) not in sv.SimpleSpec(req_version): + click.secho( + f"Installed xhiveframework-dependency '{dep}' version '{dep_version}' " + f"does not satisfy required version '{req_version}'. " + f"App '{app.name}' might not work as expected.", + fg="yellow", + ) + if throw: + click.secho(f"Please install '{dep}{req_version}' first and retry", fg="red") + sys.exit(1) + + +def get_dep_version(dep: str, dep_path: Path) -> Optional[str]: + from bench.utils.app import get_pyproject + + dep_pp = get_pyproject(str(dep_path / "pyproject.toml")) + version = dep_pp.get("project", {}).get("version") + if version: + return version + + dinit_path = dep_path / dep / "__init__.py" + if not dinit_path.is_file(): + return None + + with dinit_path.open("r", encoding="utf-8") as dinit: + for line in dinit: + if not line.startswith("__version__ =") and not line.startswith("VERSION ="): + continue + + version = line.split("=")[1].strip().strip("\"'") + if version: + return version + else: + break + + return None + + +def get_required_xhiveframework_version(app: App) -> Optional[str]: + pyproject = app.get_pyproject() or {} + + # Reference: https://lab.membtech.com/xhiveframework/bench_new/issues/1524 + req_xhiveframework = ( + pyproject.get("tool", {}) + .get("bench", {}) + .get("xhiveframework-dependencies", {}) + .get("xhiveframework") + ) + + if not req_xhiveframework: + click.secho( + "Required xhiveframework version not set in pyproject.toml, " + "please refer: https://lab.membtech.com/xhiveframework/bench_new/issues/1524", + fg="yellow", + ) + + return req_xhiveframework + + +def remove_unused_node_modules(app_path: Path) -> None: + """ + Erring a bit the side of caution; since there is no explicit way + to check if node_modules are utilized, this function checks if Vite + is being used to build the frontend code. + + Since most popular Xhiveframework apps use Vite to build their frontends, + this method should suffice. + + Note: root package.json is ignored cause those usually belong to + apps that do not have a build step and so their node_modules are + utilized during runtime. + """ + + for p in app_path.iterdir(): + if not p.is_dir(): + continue + + package_json = p / "package.json" + if not package_json.is_file(): + continue + + node_modules = p / "node_modules" + if not node_modules.is_dir(): + continue + + can_delete = False + with package_json.open("r", encoding="utf-8") as f: + package_json = json.loads(f.read()) + build_script = package_json.get("scripts", {}).get("build", "") + can_delete = "vite build" in build_script + + if can_delete: + shutil.rmtree(node_modules) + + +def make_resolution_plan(app: App, bench: "Bench"): + """ + decide what apps and versions to install and in what order + """ + resolution = OrderedDict() + resolution[app.app_name] = app + + for app_name in app._get_dependencies(): + dep_app = App(app_name, bench=bench) + is_valid_xhiveframework_branch(dep_app.url, dep_app.branch) + dep_app.required_by = app.name + if dep_app.app_name in resolution: + click.secho(f"{dep_app.app_name} is already resolved skipping", fg="yellow") + continue + resolution[dep_app.app_name] = dep_app + resolution.update(make_resolution_plan(dep_app, bench)) + app.local_resolution = [repo_name for repo_name, _ in reversed(resolution.items())] + return resolution + + +def get_excluded_apps(bench_path="."): + try: + with open(os.path.join(bench_path, "sites", "excluded_apps.txt")) as f: + return f.read().strip().split("\n") + except OSError: + return [] + + +def add_to_excluded_apps_txt(app, bench_path="."): + if app == "xhiveframework": + raise ValueError("Xhiveframework app cannot be excluded from update") + if app not in os.listdir("apps"): + raise ValueError(f"The app {app} does not exist") + apps = get_excluded_apps(bench_path=bench_path) + if app not in apps: + apps.append(app) + return write_excluded_apps_txt(apps, bench_path=bench_path) + + +def write_excluded_apps_txt(apps, bench_path="."): + with open(os.path.join(bench_path, "sites", "excluded_apps.txt"), "w") as f: + return f.write("\n".join(apps)) + + +def remove_from_excluded_apps_txt(app, bench_path="."): + apps = get_excluded_apps(bench_path=bench_path) + if app in apps: + apps.remove(app) + return write_excluded_apps_txt(apps, bench_path=bench_path) + + +def get_app( + git_url, + branch=None, + bench_path=".", + skip_assets=False, + verbose=False, + overwrite=False, + soft_link=False, + init_bench=False, + resolve_deps=False, + cache_key=None, + compress_artifacts=False, +): + """bench get-app clones a Xhiveframework App from remote (GitHub or any other git server), + and installs it on the current bench. This also resolves dependencies based on the + apps' required_apps defined in the hooks.py file. + + If the bench_path is not a bench directory, a new bench is created named using the + git_url parameter. + """ + import bench as _bench + import bench.cli as bench_cli + from bench.bench import Bench + from bench.utils.app import check_existing_dir + + if urlparse(git_url).scheme == "file" and cache_key: + git_url = coerce_url_to_name_if_possible(git_url, cache_key) + + bench = Bench(bench_path) + app = App( + git_url, branch=branch, bench=bench, soft_link=soft_link, cache_key=cache_key + ) + git_url = app.url + repo_name = app.repo + branch = app.tag + bench_setup = False + restart_bench = not init_bench + xhiveframework_path, xhiveframework_branch = None, None + + if resolve_deps: + resolution = make_resolution_plan(app, bench) + click.secho("Following apps will be installed", fg="bright_blue") + for idx, app in enumerate(reversed(resolution.values()), start=1): + print( + f"{idx}. {app.name} {f'(required by {app.required_by})' if app.required_by else ''}" + ) + + if "xhiveframework" in resolution: + # Todo: Make xhiveframework a terminal dependency for all xhiveframework apps. + xhiveframework_path, xhiveframework_branch = resolution["xhiveframework"].url, resolution["xhiveframework"].tag + + if not is_bench_directory(bench_path): + if not init_bench: + raise NotInBenchDirectoryError( + f"{os.path.realpath(bench_path)} is not a valid bench directory. " + "Run with --init-bench if you'd like to create a Bench too." + ) + + from bench.utils.system import init + + bench_path = get_available_folder_name(f"{app.repo}-bench", bench_path) + init( + path=bench_path, + xhiveframework_path=xhiveframework_path, + xhiveframework_branch=xhiveframework_branch or branch, + ) + os.chdir(bench_path) + bench_setup = True + + if bench_setup and bench_cli.from_command_line and bench_cli.dynamic_feed: + _bench.LOG_BUFFER.append( + { + "message": f"Fetching App {repo_name}", + "prefix": click.style("⏼", fg="bright_yellow"), + "is_parent": True, + "color": None, + } + ) + + if resolve_deps: + install_resolved_deps( + bench, + resolution, + bench_path=bench_path, + skip_assets=skip_assets, + verbose=verbose, + ) + return + + if app.get_cached(): + app.install( + verbose=verbose, + skip_assets=skip_assets, + restart_bench=restart_bench, + using_cached=True, + ) + return + + dir_already_exists, cloned_path = check_existing_dir(bench_path, repo_name) + to_clone = not dir_already_exists + + # application directory already exists + # prompt user to overwrite it + if dir_already_exists and ( + overwrite + or click.confirm( + f"A directory for the application '{repo_name}' already exists. " + "Do you want to continue and overwrite it?" + ) + ): + app.remove() + to_clone = True + + if to_clone: + app.get() + + if ( + to_clone + or overwrite + or click.confirm("Do you want to reinstall the existing application?") + ): + app.install(verbose=verbose, skip_assets=skip_assets, restart_bench=restart_bench) + + app.set_cache(compress_artifacts) + + +def install_resolved_deps( + bench, + resolution, + bench_path=".", + skip_assets=False, + verbose=False, +): + from bench.utils.app import check_existing_dir + + if "xhiveframework" in resolution: + # Terminal dependency + del resolution["xhiveframework"] + + for repo_name, app in reversed(resolution.items()): + existing_dir, path_to_app = check_existing_dir(bench_path, repo_name) + if existing_dir: + is_compatible = False + + try: + installed_branch = bench.apps.states[repo_name]["resolution"]["branch"].strip() + except Exception: + installed_branch = ( + subprocess.check_output( + "git rev-parse --abbrev-ref HEAD", shell=True, cwd=path_to_app + ) + .decode("utf-8") + .rstrip() + ) + try: + if app.tag is None: + current_remote = ( + subprocess.check_output( + f"git config branch.{installed_branch}.remote", shell=True, cwd=path_to_app + ) + .decode("utf-8") + .rstrip() + ) + + default_branch = ( + subprocess.check_output( + f"git symbolic-ref refs/remotes/{current_remote}/HEAD", + shell=True, + cwd=path_to_app, + ) + .decode("utf-8") + .rsplit("/")[-1] + .strip() + ) + is_compatible = default_branch == installed_branch + else: + is_compatible = installed_branch == app.tag + except Exception: + is_compatible = False + + prefix = "C" if is_compatible else "Inc" + click.secho( + f"{prefix}ompatible version of {repo_name} is already installed", + fg="green" if is_compatible else "red", + ) + app.update_app_state() + if click.confirm( + f"Do you wish to clone and install the already installed {prefix}ompatible app" + ): + click.secho(f"Removing installed app {app.name}", fg="yellow") + shutil.rmtree(path_to_app) + else: + continue + app.install_resolved_apps(skip_assets=skip_assets, verbose=verbose) + + +def new_app(app, no_git=None, bench_path="."): + if bench.XHIVEFRAMEWORK_VERSION in (0, None): + raise NotInBenchDirectoryError( + f"{os.path.realpath(bench_path)} is not a valid bench directory." + ) + + # For backwards compatibility + app = app.lower().replace(" ", "_").replace("-", "_") + if app[0].isdigit() or "." in app: + click.secho( + "App names cannot start with numbers(digits) or have dot(.) in them", fg="red" + ) + return + + apps = os.path.abspath(os.path.join(bench_path, "apps")) + args = ["make-app", apps, app] + if no_git: + if bench.XHIVEFRAMEWORK_VERSION < 14: + click.secho("Xhiveframework v14 or greater is needed for '--no-git' flag", fg="red") + return + args.append(no_git) + + logger.log(f"creating new app {app}") + run_xhiveframework_cmd(*args, bench_path=bench_path) + install_app(app, bench_path=bench_path) + + +def install_app( + app, + tag=None, + bench_path=".", + verbose=False, + no_cache=False, + restart_bench=True, + skip_assets=False, + resolution=UNSET_ARG, + using_cached=False, +): + import bench.cli as bench_cli + from bench.bench import Bench + + install_text = f"Installing {app}" + click.secho(install_text, fg="yellow") + logger.log(install_text) + + if resolution == UNSET_ARG: + resolution = [] + + bench = Bench(bench_path) + conf = bench.conf + + verbose = bench_cli.verbose or verbose + quiet_flag = "" if verbose else "--quiet" + cache_flag = "--no-cache-dir" if no_cache else "" + + app_path = os.path.realpath(os.path.join(bench_path, "apps", app)) + + bench.run( + f"{bench.python} -m pip install {quiet_flag} --upgrade -e {app_path} {cache_flag}" + ) + + if conf.get("developer_mode"): + install_python_dev_dependencies(apps=app, bench_path=bench_path, verbose=verbose) + + if not using_cached and os.path.exists(os.path.join(app_path, "package.json")): + yarn_install = "yarn install --check-files" + if verbose: + yarn_install += " --verbose" + bench.run(yarn_install, cwd=app_path) + + bench.apps.sync(app_name=app, required=resolution, branch=tag, app_dir=app_path) + + if not skip_assets: + build_assets(bench_path=bench_path, app=app, using_cached=using_cached) + + if restart_bench: + # Avoiding exceptions here as production might not be set-up + # OR we might just be generating docker images. + bench.reload(_raise=False) + + +def pull_apps(apps=None, bench_path=".", reset=False): + """Check all apps if there no local changes, pull""" + from bench.bench import Bench + from bench.utils.app import get_current_branch, get_remote + + bench = Bench(bench_path) + rebase = "--rebase" if bench.conf.get("rebase_on_pull") else "" + apps = apps or bench.apps + excluded_apps = bench.excluded_apps + + # check for local changes + if not reset: + for app in apps: + if app in excluded_apps: + print(f"Skipping reset for app {app}") + continue + app_dir = get_repo_dir(app, bench_path=bench_path) + if os.path.exists(os.path.join(app_dir, ".git")): + out = subprocess.check_output("git status", shell=True, cwd=app_dir) + out = out.decode("utf-8") + if not re.search(r"nothing to commit, working (directory|tree) clean", out): + print( + f""" + +Cannot proceed with update: You have local changes in app "{app}" that are not committed. + +Here are your choices: + +1. Merge the {app} app manually with "git pull" / "git pull --rebase" and fix conflicts. +2. Temporarily remove your changes with "git stash" or discard them completely + with "bench update --reset" or for individual repositries "git reset --hard" +3. If your changes are helpful for others, send in a pull request via GitHub and + wait for them to be merged in the core.""" + ) + sys.exit(1) + + for app in apps: + if app in excluded_apps: + print(f"Skipping pull for app {app}") + continue + app_dir = get_repo_dir(app, bench_path=bench_path) + if os.path.exists(os.path.join(app_dir, ".git")): + remote = get_remote(app) + if not remote: + # remote is False, i.e. remote doesn't exist, add the app to excluded_apps.txt + add_to_excluded_apps_txt(app, bench_path=bench_path) + print( + f"Skipping pull for app {app}, since remote doesn't exist, and" + " adding it to excluded apps" + ) + continue + + if not bench.conf.get("shallow_clone") or not reset: + is_shallow = os.path.exists(os.path.join(app_dir, ".git", "shallow")) + if is_shallow: + s = " to safely pull remote changes." if not reset else "" + print(f"Unshallowing {app}{s}") + bench.run(f"git fetch {remote} --unshallow", cwd=app_dir) + + branch = get_current_branch(app, bench_path=bench_path) + logger.log(f"pulling {app}") + if reset: + reset_cmd = f"git reset --hard {remote}/{branch}" + if bench.conf.get("shallow_clone"): + bench.run(f"git fetch --depth=1 --no-tags {remote} {branch}", cwd=app_dir) + bench.run(reset_cmd, cwd=app_dir) + bench.run("git reflog expire --all", cwd=app_dir) + bench.run("git gc --prune=all", cwd=app_dir) + else: + bench.run("git fetch --all", cwd=app_dir) + bench.run(reset_cmd, cwd=app_dir) + else: + bench.run(f"git pull {rebase} {remote} {branch}", cwd=app_dir) + bench.run('find . -name "*.pyc" -delete', cwd=app_dir) + + +def use_rq(bench_path): + bench_path = os.path.abspath(bench_path) + celery_app = os.path.join(bench_path, "apps", "xhiveframework", "xhiveframework", "celery_app.py") + return not os.path.exists(celery_app) + + +def get_repo_dir(app, bench_path="."): + return os.path.join(bench_path, "apps", app) + + +def is_git_repo(app_path): + try: + git.Repo(app_path, search_parent_directories=False) + return True + except git.exc.InvalidGitRepositoryError: + return False + + +def install_apps_from_path(path, bench_path="."): + apps = get_apps_json(path) + for app in apps: + get_app( + app["url"], + branch=app.get("branch"), + bench_path=bench_path, + skip_assets=True, + ) + + +def get_apps_json(path): + import requests + + if path.startswith("http"): + r = requests.get(path) + return r.json() + + with open(path) as f: + return json.load(f) diff --git a/bench/bench.py b/bench/bench.py new file mode 100644 index 0000000..385b6bd --- /dev/null +++ b/bench/bench.py @@ -0,0 +1,498 @@ +# imports - standard imports +import subprocess +from functools import lru_cache +import os +import shutil +import json +import sys +import logging +from typing import List, MutableSequence, TYPE_CHECKING, Union + +# imports - module imports +import bench +from bench.exceptions import AppNotInstalledError, InvalidRemoteException +from bench.config.common_site_config import setup_config +from bench.utils import ( + UNSET_ARG, + paths_in_bench, + exec_cmd, + is_bench_directory, + is_xhiveframework_app, + get_cmd_output, + get_git_version, + log, + run_xhiveframework_cmd, +) +from bench.utils.bench import ( + validate_app_installed_on_sites, + restart_supervisor_processes, + restart_systemd_processes, + restart_process_manager, + remove_backups_crontab, + get_venv_path, + get_env_cmd, +) +from bench.utils.render import job, step +from bench.utils.app import get_current_version +from bench.app import is_git_repo + + +if TYPE_CHECKING: + from bench.app import App + +logger = logging.getLogger(bench.PROJECT_NAME) + + +class Base: + def run(self, cmd, cwd=None, _raise=True): + return exec_cmd(cmd, cwd=cwd or self.cwd, _raise=_raise) + + +class Validator: + def validate_app_uninstall(self, app): + if app not in self.apps: + raise AppNotInstalledError(f"No app named {app}") + validate_app_installed_on_sites(app, bench_path=self.name) + + +@lru_cache(maxsize=None) +class Bench(Base, Validator): + def __init__(self, path): + self.name = path + self.cwd = os.path.abspath(path) + self.exists = is_bench_directory(self.name) + + self.setup = BenchSetup(self) + self.teardown = BenchTearDown(self) + self.apps = BenchApps(self) + + self.apps_txt = os.path.join(self.name, "sites", "apps.txt") + self.excluded_apps_txt = os.path.join(self.name, "sites", "excluded_apps.txt") + + @property + def python(self) -> str: + return get_env_cmd("python", bench_path=self.name) + + @property + def shallow_clone(self) -> bool: + config = self.conf + + if config: + if config.get("release_bench") or not config.get("shallow_clone"): + return False + + return get_git_version() > 1.9 + + @property + def excluded_apps(self) -> List: + try: + with open(self.excluded_apps_txt) as f: + return f.read().strip().split("\n") + except Exception: + return [] + + @property + def sites(self) -> List: + return [ + path + for path in os.listdir(os.path.join(self.name, "sites")) + if os.path.exists(os.path.join("sites", path, "site_config.json")) + ] + + @property + def conf(self): + from bench.config.common_site_config import get_config + + return get_config(self.name) + + def init(self): + self.setup.dirs() + self.setup.env() + self.setup.backups() + + def drop(self): + self.teardown.backups() + self.teardown.dirs() + + def install(self, app, branch=None): + from bench.app import App + + app = App(app, branch=branch) + self.apps.append(app) + self.apps.sync() + + def uninstall(self, app, no_backup=False, force=False): + from bench.app import App + + if not force: + self.validate_app_uninstall(app) + try: + self.apps.remove(App(app, bench=self, to_clone=False), no_backup=no_backup) + except InvalidRemoteException: + if not force: + raise + + self.apps.sync() + # self.build() - removed because it seems unnecessary + self.reload(_raise=False) + + @step(title="Building Bench Assets", success="Bench Assets Built") + def build(self): + # build assets & stuff + run_xhiveframework_cmd("build", bench_path=self.name) + + @step(title="Reloading Bench Processes", success="Bench Processes Reloaded") + def reload(self, web=False, supervisor=True, systemd=True, _raise=True): + """If web is True, only web workers are restarted""" + conf = self.conf + + if conf.get("developer_mode"): + restart_process_manager(bench_path=self.name, web_workers=web) + if supervisor or conf.get("restart_supervisor_on_update"): + restart_supervisor_processes(bench_path=self.name, web_workers=web, _raise=_raise) + if systemd and conf.get("restart_systemd_on_update"): + restart_systemd_processes(bench_path=self.name, web_workers=web, _raise=_raise) + + def get_installed_apps(self) -> List: + """Returns list of installed apps on bench, not in excluded_apps.txt""" + try: + installed_packages = get_cmd_output(f"{self.python} -m pip freeze", cwd=self.name) + except Exception: + installed_packages = [] + + return [ + app + for app in self.apps + if app not in self.excluded_apps and app in installed_packages + ] + + +class BenchApps(MutableSequence): + def __init__(self, bench: Bench): + self.bench = bench + self.states_path = os.path.join(self.bench.name, "sites", "apps.json") + self.apps_path = os.path.join(self.bench.name, "apps") + self.initialize_apps() + self.set_states() + + def set_states(self): + try: + with open(self.states_path) as f: + self.states = json.loads(f.read() or "{}") + except FileNotFoundError: + self.states = {} + + def update_apps_states( + self, + app_dir: str = None, + app_name: Union[str, None] = None, + branch: Union[str, None] = None, + required: List = UNSET_ARG, + ): + if required == UNSET_ARG: + required = [] + if self.apps and not os.path.exists(self.states_path): + # idx according to apps listed in apps.txt (backwards compatibility) + # Keeping xhiveframework as the first app. + if "xhiveframework" in self.apps: + self.apps.remove("xhiveframework") + self.apps.insert(0, "xhiveframework") + with open(self.bench.apps_txt, "w") as f: + f.write("\n".join(self.apps)) + + print("Found existing apps updating states...") + for idx, app in enumerate(self.apps, start=1): + self.states[app] = { + "resolution": {"commit_hash": None, "branch": None}, + "required": required, + "idx": idx, + "version": get_current_version(app, self.bench.name), + } + + apps_to_remove = [] + for app in self.states: + if app not in self.apps: + apps_to_remove.append(app) + + for app in apps_to_remove: + del self.states[app] + + if app_name and not app_dir: + app_dir = app_name + + if app_name and app_name not in self.states: + version = get_current_version(app_name, self.bench.name) + + app_dir = os.path.join(self.apps_path, app_dir) + is_repo = is_git_repo(app_dir) + if is_repo: + if not branch: + branch = ( + subprocess.check_output( + "git rev-parse --abbrev-ref HEAD", shell=True, cwd=app_dir + ) + .decode("utf-8") + .rstrip() + ) + + commit_hash = ( + subprocess.check_output(f"git rev-parse {branch}", shell=True, cwd=app_dir) + .decode("utf-8") + .rstrip() + ) + + self.states[app_name] = { + "is_repo": is_repo, + "resolution": "not a repo" + if not is_repo + else {"commit_hash": commit_hash, "branch": branch}, + "required": required, + "idx": len(self.states) + 1, + "version": version, + } + + with open(self.states_path, "w") as f: + f.write(json.dumps(self.states, indent=4)) + + def sync( + self, + app_name: Union[str, None] = None, + app_dir: Union[str, None] = None, + branch: Union[str, None] = None, + required: List = UNSET_ARG, + ): + if required == UNSET_ARG: + required = [] + self.initialize_apps() + + with open(self.bench.apps_txt, "w") as f: + f.write("\n".join(self.apps)) + + self.update_apps_states( + app_name=app_name, app_dir=app_dir, branch=branch, required=required + ) + + def initialize_apps(self): + try: + self.apps = [ + x + for x in os.listdir(os.path.join(self.bench.name, "apps")) + if is_xhiveframework_app(os.path.join(self.bench.name, "apps", x)) + ] + self.apps.remove("xhiveframework") + self.apps.insert(0, "xhiveframework") + except FileNotFoundError: + self.apps = [] + + def __getitem__(self, key): + """retrieves an item by its index, key""" + return self.apps[key] + + def __setitem__(self, key, value): + """set the item at index, key, to value""" + # should probably not be allowed + # self.apps[key] = value + raise NotImplementedError + + def __delitem__(self, key): + """removes the item at index, key""" + # TODO: uninstall and delete app from bench + del self.apps[key] + + def __len__(self): + return len(self.apps) + + def insert(self, key, value): + """add an item, value, at index, key.""" + # TODO: fetch and install app to bench + self.apps.insert(key, value) + + def add(self, app: "App"): + app.get() + app.install() + super().append(app.app_name) + self.apps.sort() + + def remove(self, app: "App", no_backup: bool = False): + app.uninstall() + app.remove(no_backup=no_backup) + super().remove(app.app_name) + + def append(self, app: "App"): + return self.add(app) + + def __repr__(self): + return self.__str__() + + def __str__(self): + return str([x for x in self.apps]) + + +class BenchSetup(Base): + def __init__(self, bench: Bench): + self.bench = bench + self.cwd = self.bench.cwd + + @step(title="Setting Up Directories", success="Directories Set Up") + def dirs(self): + os.makedirs(self.bench.name, exist_ok=True) + + for dirname in paths_in_bench: + os.makedirs(os.path.join(self.bench.name, dirname), exist_ok=True) + + @step(title="Setting Up Environment", success="Environment Set Up") + def env(self, python="python3"): + """Setup env folder + - create env if not exists + - upgrade env pip + - install xhiveframework python dependencies + """ + import bench.cli + import click + + verbose = bench.cli.verbose + + click.secho("Setting Up Environment", fg="yellow") + + xhiveframework = os.path.join(self.bench.name, "apps", "xhiveframework") + quiet_flag = "" if verbose else "--quiet" + + if not os.path.exists(self.bench.python): + venv = get_venv_path(verbose=verbose, python=python) + self.run(f"{venv} env", cwd=self.bench.name) + + self.pip() + self.wheel() + + if os.path.exists(xhiveframework): + self.run( + f"{self.bench.python} -m pip install {quiet_flag} --upgrade -e {xhiveframework}", + cwd=self.bench.name, + ) + + @step(title="Setting Up Bench Config", success="Bench Config Set Up") + def config(self, redis=True, procfile=True, additional_config=None): + """Setup config folder + - create pids folder + - generate sites/common_site_config.json + """ + setup_config(self.bench.name, additional_config=additional_config) + + if redis: + from bench.config.redis import generate_config + + generate_config(self.bench.name) + + if procfile: + from bench.config.procfile import setup_procfile + + setup_procfile(self.bench.name, skip_redis=not redis) + + @step(title="Updating pip", success="Updated pip") + def pip(self, verbose=False): + """Updates env pip; assumes that env is setup""" + import bench.cli + + verbose = bench.cli.verbose or verbose + quiet_flag = "" if verbose else "--quiet" + + return self.run( + f"{self.bench.python} -m pip install {quiet_flag} --upgrade pip", cwd=self.bench.name + ) + + @step(title="Installing wheel", success="Installed wheel") + def wheel(self, verbose=False): + """Wheel is required for building old setup.py packages. + ref: https://github.com/pypa/pip/issues/8559""" + import bench.cli + + verbose = bench.cli.verbose or verbose + quiet_flag = "" if verbose else "--quiet" + + return self.run( + f"{self.bench.python} -m pip install {quiet_flag} wheel", cwd=self.bench.name + ) + + def logging(self): + from bench.utils import setup_logging + + return setup_logging(bench_path=self.bench.name) + + @step(title="Setting Up Bench Patches", success="Bench Patches Set Up") + def patches(self): + shutil.copy( + os.path.join(os.path.dirname(os.path.abspath(__file__)), "patches", "patches.txt"), + os.path.join(self.bench.name, "patches.txt"), + ) + + @step(title="Setting Up Backups Cronjob", success="Backups Cronjob Set Up") + def backups(self): + # TODO: to something better for logging data? - maybe a wrapper that auto-logs with more context + logger.log("setting up backups") + + from crontab import CronTab + + bench_dir = os.path.abspath(self.bench.name) + user = self.bench.conf.get("xhiveframework_user") + logfile = os.path.join(bench_dir, "logs", "backup.log") + system_crontab = CronTab(user=user) + backup_command = f"cd {bench_dir} && {sys.argv[0]} --verbose --site all backup" + job_command = f"{backup_command} >> {logfile} 2>&1" + + if job_command not in str(system_crontab): + job = system_crontab.new( + command=job_command, comment="bench auto backups set for every 6 hours" + ) + job.every(6).hours() + system_crontab.write() + + logger.log("backups were set up") + + @job(title="Setting Up Bench Dependencies", success="Bench Dependencies Set Up") + def requirements(self, apps=None): + """Install and upgrade specified / all installed apps on given Bench""" + from bench.app import App + + apps = apps or self.bench.apps + + self.pip() + + print(f"Installing {len(apps)} applications...") + + for app in apps: + path_to_app = os.path.join(self.bench.name, "apps", app) + app = App(path_to_app, bench=self.bench, to_clone=False).install( + skip_assets=True, restart_bench=False, ignore_resolution=True + ) + + def python(self, apps=None): + """Install and upgrade Python dependencies for specified / all installed apps on given Bench""" + import bench.cli + + apps = apps or self.bench.apps + + quiet_flag = "" if bench.cli.verbose else "--quiet" + + self.pip() + + for app in apps: + app_path = os.path.join(self.bench.name, "apps", app) + log(f"\nInstalling python dependencies for {app}", level=3, no_log=True) + self.run(f"{self.bench.python} -m pip install {quiet_flag} --upgrade -e {app_path}") + + def node(self, apps=None): + """Install and upgrade Node dependencies for specified / all apps on given Bench""" + from bench.utils.bench import update_node_packages + + return update_node_packages(bench_path=self.bench.name, apps=apps) + + +class BenchTearDown: + def __init__(self, bench): + self.bench = bench + + def backups(self): + remove_backups_crontab(self.bench.name) + + def dirs(self): + shutil.rmtree(self.bench.name) diff --git a/bench/cli.py b/bench/cli.py new file mode 100755 index 0000000..e5f6025 --- /dev/null +++ b/bench/cli.py @@ -0,0 +1,267 @@ +# imports - standard imports +import atexit +from contextlib import contextmanager +from logging import Logger +import os +import pwd +import sys + +# imports - third party imports +import click + +# imports - module imports +import bench +from bench.bench import Bench +from bench.commands import bench_command +from bench.config.common_site_config import get_config +from bench.utils import ( + check_latest_version, + drop_privileges, + find_parent_bench, + get_env_xhiveframework_commands, + get_cmd_output, + is_bench_directory, + is_dist_editable, + is_root, + log, + setup_logging, + get_cmd_from_sysargv, +) +from bench.utils.bench import get_env_cmd +from importlib.util import find_spec + + +# these variables are used to show dynamic outputs on the terminal +dynamic_feed = False +verbose = False +is_envvar_warn_set = None +from_command_line = False # set when commands are executed via the CLI +bench.LOG_BUFFER = [] + +change_uid_msg = "You should not run this command as root" +src = os.path.dirname(__file__) +SKIP_MODULE_TRACEBACK = ("click",) + + +@contextmanager +def execute_cmd(check_for_update=True, command: str = None, logger: Logger = None): + if check_for_update: + atexit.register(check_latest_version) + + try: + yield + except BaseException as e: + return_code = getattr(e, "code", 1) + + if isinstance(e, Exception): + click.secho(f"ERROR: {e}", fg="red") + + if return_code: + logger.warning(f"{command} executed with exit code {return_code}") + + raise e + + +def cli(): + setup_clear_cache() + global from_command_line, bench_config, is_envvar_warn_set, verbose + + from_command_line = True + command = " ".join(sys.argv) + argv = set(sys.argv) + is_envvar_warn_set = not (os.environ.get("BENCH_DEVELOPER") or os.environ.get("CI")) + is_cli_command = len(sys.argv) > 1 and not argv.intersection({"src", "--version"}) + cmd_from_sys = get_cmd_from_sysargv() + + if "--verbose" in argv: + verbose = True + + change_working_directory() + logger = setup_logging() + logger.info(command) + + bench_config = get_config(".") + + if is_cli_command: + check_uid() + change_uid() + change_dir() + + if ( + is_envvar_warn_set + and is_cli_command + and not bench_config.get("developer_mode") + and is_dist_editable(bench.PROJECT_NAME) + ): + log( + "bench is installed in editable mode!\n\nThis is not the recommended mode" + " of installation for production. Instead, install the package from PyPI" + " with: `pip install xhiveframework-bench`\n", + level=3, + ) + + in_bench = is_bench_directory() + + if ( + not in_bench + and len(sys.argv) > 1 + and not argv.intersection( + {"init", "find", "src", "drop", "get", "get-app", "--version"} + ) + and not cmd_requires_root() + ): + log("Command not being executed in bench directory", level=3) + + if len(sys.argv) == 1 or sys.argv[1] == "--help": + print(click.Context(bench_command).get_help()) + if in_bench: + print(get_xhiveframework_help()) + return + + _opts = [x.opts + x.secondary_opts for x in bench_command.params] + opts = {item for sublist in _opts for item in sublist} + + setup_exception_handler() + + # handle usages like `--use-feature='feat-x'` and `--use-feature 'feat-x'` + if cmd_from_sys and cmd_from_sys.split("=", 1)[0].strip() in opts: + bench_command() + + if cmd_from_sys in bench_command.commands: + with execute_cmd(check_for_update=is_cli_command, command=command, logger=logger): + bench_command() + + if in_bench: + xhiveframework_cmd() + + bench_command() + + +def check_uid(): + if cmd_requires_root() and not is_root(): + log("superuser privileges required for this command", level=3) + sys.exit(1) + + +def cmd_requires_root(): + if len(sys.argv) > 2 and sys.argv[2] in ( + "production", + "sudoers", + "lets-encrypt", + "fonts", + "print", + "firewall", + "ssh-port", + "role", + "fail2ban", + "wildcard-ssl", + ): + return True + if len(sys.argv) >= 2 and sys.argv[1] in ( + "patch", + "renew-lets-encrypt", + "disable-production", + ): + return True + if len(sys.argv) > 2 and sys.argv[1] in ("install"): + return True + + +def change_dir(): + if os.path.exists("config.json") or "init" in sys.argv: + return + dir_path_file = "/etc/xhiveframework_bench_dir" + if os.path.exists(dir_path_file): + with open(dir_path_file) as f: + dir_path = f.read().strip() + if os.path.exists(dir_path): + os.chdir(dir_path) + + +def change_uid(): + if is_root() and not cmd_requires_root(): + xhiveframework_user = bench_config.get("xhiveframework_user") + if xhiveframework_user: + drop_privileges(uid_name=xhiveframework_user, gid_name=xhiveframework_user) + os.environ["HOME"] = pwd.getpwnam(xhiveframework_user).pw_dir + else: + log(change_uid_msg, level=3) + sys.exit(1) + + +def app_cmd(bench_path="."): + f = get_env_cmd("python", bench_path=bench_path) + os.chdir(os.path.join(bench_path, "sites")) + os.execv(f, [f] + ["-m", "xhiveframework.utils.bench_helper"] + sys.argv[1:]) + + +def xhiveframework_cmd(bench_path="."): + f = get_env_cmd("python", bench_path=bench_path) + os.chdir(os.path.join(bench_path, "sites")) + os.execv(f, [f] + ["-m", "xhiveframework.utils.bench_helper", "xhiveframework"] + sys.argv[1:]) + + +def get_xhiveframework_commands(): + if not is_bench_directory(): + return set() + + return set(get_env_xhiveframework_commands()) + + +def get_xhiveframework_help(bench_path="."): + python = get_env_cmd("python", bench_path=bench_path) + sites_path = os.path.join(bench_path, "sites") + try: + out = get_cmd_output( + f"{python} -m xhiveframework.utils.bench_helper get-xhiveframework-help", cwd=sites_path + ) + return "\n\nFramework commands:\n" + out.split("Commands:")[1] + except Exception: + return "" + + +def change_working_directory(): + """Allows bench commands to be run from anywhere inside a bench directory""" + cur_dir = os.path.abspath(".") + bench_path = find_parent_bench(cur_dir) + bench.current_path = os.getcwd() + bench.updated_path = bench_path + + if bench_path: + os.chdir(bench_path) + + +def setup_clear_cache(): + from copy import copy + + f = copy(os.chdir) + + def _chdir(*args, **kwargs): + Bench.cache_clear() + get_env_cmd.cache_clear() + return f(*args, **kwargs) + + os.chdir = _chdir + + +def setup_exception_handler(): + from traceback import format_exception + from bench.exceptions import CommandFailedError + + def handle_exception(exc_type, exc_info, tb): + if exc_type == CommandFailedError: + print("".join(generate_exc(exc_type, exc_info, tb))) + else: + sys.__excepthook__(exc_type, exc_info, tb) + + def generate_exc(exc_type, exc_info, tb): + TB_SKIP = [ + os.path.dirname(find_spec(module).origin) for module in SKIP_MODULE_TRACEBACK + ] + + for tb_line in format_exception(exc_type, exc_info, tb): + for skip_module in TB_SKIP: + if skip_module not in tb_line: + yield tb_line + + sys.excepthook = handle_exception diff --git a/bench/commands/__init__.py b/bench/commands/__init__.py new file mode 100755 index 0000000..b1d087e --- /dev/null +++ b/bench/commands/__init__.py @@ -0,0 +1,133 @@ +# imports - third party imports +import click + +# imports - module imports +from bench.utils.cli import ( + MultiCommandGroup, + print_bench_version, + use_experimental_feature, + setup_verbosity, +) + + +@click.group(cls=MultiCommandGroup) +@click.option( + "--version", + is_flag=True, + is_eager=True, + callback=print_bench_version, + expose_value=False, +) +@click.option( + "--use-feature", + is_eager=True, + callback=use_experimental_feature, + expose_value=False, +) +@click.option( + "-v", + "--verbose", + is_flag=True, + callback=setup_verbosity, + expose_value=False, +) +def bench_command(bench_path="."): + import bench + + bench.set_xhiveframework_version(bench_path=bench_path) + + +from bench.commands.make import ( + drop, + exclude_app_for_update, + get_app, + include_app_for_update, + init, + new_app, + pip, + remove_app, + validate_dependencies, +) + +bench_command.add_command(init) +bench_command.add_command(drop) +bench_command.add_command(get_app) +bench_command.add_command(new_app) +bench_command.add_command(remove_app) +bench_command.add_command(exclude_app_for_update) +bench_command.add_command(include_app_for_update) +bench_command.add_command(pip) +bench_command.add_command(validate_dependencies) + + +from bench.commands.update import ( + retry_upgrade, + switch_to_branch, + switch_to_develop, + update, +) + +bench_command.add_command(update) +bench_command.add_command(retry_upgrade) +bench_command.add_command(switch_to_branch) +bench_command.add_command(switch_to_develop) + + +from bench.commands.utils import ( + app_cache_helper, + backup_all_sites, + bench_src, + disable_production, + download_translations, + find_benches, + migrate_env, + renew_lets_encrypt, + restart, + set_mariadb_host, + set_nginx_port, + set_redis_cache_host, + set_redis_queue_host, + set_redis_socketio_host, + set_ssl_certificate, + set_ssl_certificate_key, + set_url_root, + start, +) + +bench_command.add_command(start) +bench_command.add_command(restart) +bench_command.add_command(set_nginx_port) +bench_command.add_command(set_ssl_certificate) +bench_command.add_command(set_ssl_certificate_key) +bench_command.add_command(set_url_root) +bench_command.add_command(set_mariadb_host) +bench_command.add_command(set_redis_cache_host) +bench_command.add_command(set_redis_queue_host) +bench_command.add_command(set_redis_socketio_host) +bench_command.add_command(download_translations) +bench_command.add_command(backup_all_sites) +bench_command.add_command(renew_lets_encrypt) +bench_command.add_command(disable_production) +bench_command.add_command(bench_src) +bench_command.add_command(find_benches) +bench_command.add_command(migrate_env) +bench_command.add_command(app_cache_helper) + +from bench.commands.setup import setup + +bench_command.add_command(setup) + + +from bench.commands.config import config + +bench_command.add_command(config) + +from bench.commands.git import remote_reset_url, remote_set_url, remote_urls + +bench_command.add_command(remote_set_url) +bench_command.add_command(remote_reset_url) +bench_command.add_command(remote_urls) + +from bench.commands.install import install + +bench_command.add_command(install) diff --git a/bench/commands/config.py b/bench/commands/config.py new file mode 100644 index 0000000..056b60c --- /dev/null +++ b/bench/commands/config.py @@ -0,0 +1,99 @@ +# imports - module imports +from bench.config.common_site_config import update_config, put_config + +# imports - third party imports +import click + + +@click.group(help="Change bench configuration") +def config(): + pass + + +@click.command( + "restart_supervisor_on_update", + help="Enable/Disable auto restart of supervisor processes", +) +@click.argument("state", type=click.Choice(["on", "off"])) +def config_restart_supervisor_on_update(state): + update_config({"restart_supervisor_on_update": state == "on"}) + + +@click.command( + "restart_systemd_on_update", help="Enable/Disable auto restart of systemd units" +) +@click.argument("state", type=click.Choice(["on", "off"])) +def config_restart_systemd_on_update(state): + update_config({"restart_systemd_on_update": state == "on"}) + + +@click.command( + "dns_multitenant", help="Enable/Disable bench multitenancy on running bench update" +) +@click.argument("state", type=click.Choice(["on", "off"])) +def config_dns_multitenant(state): + update_config({"dns_multitenant": state == "on"}) + + +@click.command( + "serve_default_site", help="Configure nginx to serve the default site on port 80" +) +@click.argument("state", type=click.Choice(["on", "off"])) +def config_serve_default_site(state): + update_config({"serve_default_site": state == "on"}) + + +@click.command("rebase_on_pull", help="Rebase repositories on pulling") +@click.argument("state", type=click.Choice(["on", "off"])) +def config_rebase_on_pull(state): + update_config({"rebase_on_pull": state == "on"}) + + +@click.command("http_timeout", help="Set HTTP timeout") +@click.argument("seconds", type=int) +def config_http_timeout(seconds): + update_config({"http_timeout": seconds}) + + +@click.command("set-common-config", help="Set value in common config") +@click.option("configs", "-c", "--config", multiple=True, type=(str, str)) +def set_common_config(configs): + import ast + + common_site_config = {} + for key, value in configs: + if value in ("true", "false"): + value = value.title() + try: + value = ast.literal_eval(value) + except ValueError: + pass + + common_site_config[key] = value + + update_config(common_site_config, bench_path=".") + + +@click.command( + "remove-common-config", help="Remove specific keys from current bench's common config" +) +@click.argument("keys", nargs=-1) +def remove_common_config(keys): + from bench.bench import Bench + + common_site_config = Bench(".").conf + for key in keys: + if key in common_site_config: + del common_site_config[key] + + put_config(common_site_config) + + +config.add_command(config_restart_supervisor_on_update) +config.add_command(config_restart_systemd_on_update) +config.add_command(config_dns_multitenant) +config.add_command(config_rebase_on_pull) +config.add_command(config_serve_default_site) +config.add_command(config_http_timeout) +config.add_command(set_common_config) +config.add_command(remove_common_config) diff --git a/bench/commands/git.py b/bench/commands/git.py new file mode 100644 index 0000000..3875bf7 --- /dev/null +++ b/bench/commands/git.py @@ -0,0 +1,37 @@ +# imports - standard imports +import os +import subprocess + +# imports - module imports +from bench.bench import Bench +from bench.app import get_repo_dir +from bench.utils import set_git_remote_url +from bench.utils.app import get_remote + +# imports - third party imports +import click + + +@click.command('remote-set-url', help="Set app remote url") +@click.argument('git-url') +def remote_set_url(git_url): + set_git_remote_url(git_url) + + +@click.command('remote-reset-url', help="Reset app remote url to xhiveframework official") +@click.argument('app') +def remote_reset_url(app): + git_url = f"https://lab.membtech.com/xhiveframework/{app}.git" + set_git_remote_url(git_url) + + +@click.command('remote-urls', help="Show apps remote url") +def remote_urls(): + for app in Bench(".").apps: + repo_dir = get_repo_dir(app) + + if os.path.exists(os.path.join(repo_dir, '.git')): + remote = get_remote(app) + remote_url = subprocess.check_output(['git', 'config', '--get', f'remote.{remote}.url'], cwd=repo_dir).strip() + print(f"{app}\t{remote_url}") + diff --git a/bench/commands/install.py b/bench/commands/install.py new file mode 100644 index 0000000..9d839e6 --- /dev/null +++ b/bench/commands/install.py @@ -0,0 +1,123 @@ +# imports - module imports +from bench.utils import run_playbook +from bench.utils.system import setup_sudoers + +# imports - third party imports +import click + + +extra_vars = {"production": True} + + +@click.group(help="Install system dependencies for setting up Xhiveframework environment") +def install(): + pass + + +@click.command( + "prerequisites", + help="Installs pre-requisite libraries, essential tools like b2zip, htop, screen, vim, x11-fonts, python libs, cups and Redis", +) +def install_prerequisites(): + run_playbook("site.yml", tag="common, redis") + + +@click.command( + "mariadb", help="Install and setup MariaDB of specified version and root password" +) +@click.option("--mysql_root_password", "--mysql-root-password", + "--mariadb_root_password", "--mariadb-root-password", default="") +@click.option("--version", default="10.3") +def install_mariadb(mysql_root_password, version): + if mysql_root_password: + extra_vars.update( + { + "mysql_root_password": mysql_root_password, + } + ) + + extra_vars.update({"mariadb_version": version}) + + run_playbook("site.yml", extra_vars=extra_vars, tag="mariadb") + + +@click.command("wkhtmltopdf", help="Installs wkhtmltopdf v0.12.3 for linux") +def install_wkhtmltopdf(): + run_playbook("site.yml", extra_vars=extra_vars, tag="wkhtmltopdf") + + +@click.command("nodejs", help="Installs Node.js v8") +def install_nodejs(): + run_playbook("site.yml", extra_vars=extra_vars, tag="nodejs") + + +@click.command("psutil", help="Installs psutil via pip") +def install_psutil(): + run_playbook("site.yml", extra_vars=extra_vars, tag="psutil") + + +@click.command( + "supervisor", + help="Installs supervisor. If user is specified, sudoers is setup for that user", +) +@click.option("--user") +def install_supervisor(user=None): + run_playbook("site.yml", extra_vars=extra_vars, tag="supervisor") + if user: + setup_sudoers(user) + + +@click.command( + "nginx", help="Installs NGINX. If user is specified, sudoers is setup for that user" +) +@click.option("--user") +def install_nginx(user=None): + run_playbook("site.yml", extra_vars=extra_vars, tag="nginx") + if user: + setup_sudoers(user) + + +@click.command("virtualbox", help="Installs virtualbox") +def install_virtualbox(): + run_playbook("vm_build.yml", tag="virtualbox") + + +@click.command("packer", help="Installs Oracle virtualbox and packer 1.2.1") +def install_packer(): + run_playbook("vm_build.yml", tag="packer") + + +@click.command( + "fail2ban", + help="Install fail2ban, an intrusion prevention software framework that protects computer servers from brute-force attacks", +) +@click.option( + "--maxretry", + default=6, + help="Number of matches (i.e. value of the counter) which triggers ban action on the IP.", +) +@click.option( + "--bantime", + default=600, + help="The counter is set to zero if no match is found within 'findtime' seconds.", +) +@click.option( + "--findtime", + default=600, + help='Duration (in seconds) for IP to be banned for. Negative number for "permanent" ban.', +) +def install_failtoban(**kwargs): + extra_vars.update(kwargs) + run_playbook("site.yml", extra_vars=extra_vars, tag="fail2ban") + + +install.add_command(install_prerequisites) +install.add_command(install_mariadb) +install.add_command(install_wkhtmltopdf) +install.add_command(install_nodejs) +install.add_command(install_psutil) +install.add_command(install_supervisor) +install.add_command(install_nginx) +install.add_command(install_failtoban) +install.add_command(install_virtualbox) +install.add_command(install_packer) diff --git a/bench/commands/make.py b/bench/commands/make.py new file mode 100755 index 0000000..68f499b --- /dev/null +++ b/bench/commands/make.py @@ -0,0 +1,272 @@ +# imports - third party imports +import click + + +@click.command("init", help="Initialize a new bench instance in the specified path") +@click.argument("path") +@click.option( + "--version", + "--xhiveframework-branch", + "xhiveframework_branch", + default=None, + help="Clone a particular branch of xhiveframework", +) +@click.option( + "--ignore-exist", is_flag=True, default=False, help="Ignore if Bench instance exists." +) +@click.option( + "--python", type=str, default="python3", help="Path to Python Executable." +) +@click.option( + "--apps_path", default=None, help="path to json files with apps to install after init" +) +@click.option("--xhiveframework-path", default=None, help="path to xhiveframework repo") +@click.option("--clone-from", default=None, help="copy repos from path") +@click.option( + "--clone-without-update", is_flag=True, help="copy repos from path without update" +) +@click.option("--no-procfile", is_flag=True, help="Do not create a Procfile") +@click.option( + "--no-backups", + is_flag=True, + help="Do not set up automatic periodic backups for all sites on this bench", +) +@click.option( + "--skip-redis-config-generation", + is_flag=True, + help="Skip redis config generation if already specifying the common-site-config file", +) +@click.option("--skip-assets", is_flag=True, default=False, help="Do not build assets") +@click.option("--install-app", help="Install particular app after initialization") +@click.option("--verbose", is_flag=True, help="Verbose output during install") +@click.option( + "--dev", + is_flag=True, + default=False, + help="Enable developer mode and install development dependencies.", +) +def init( + path, + apps_path, + xhiveframework_path, + xhiveframework_branch, + no_procfile, + no_backups, + clone_from, + verbose, + skip_redis_config_generation, + clone_without_update, + ignore_exist=False, + skip_assets=False, + python="python3", + install_app=None, + dev=False, +): + import os + + from bench.utils import log + from bench.utils.system import init + + if not ignore_exist and os.path.exists(path): + log(f"Bench instance already exists at {path}", level=2) + return + + try: + init( + path, + apps_path=apps_path, # can be used from --config flag? Maybe config file could have more info? + no_procfile=no_procfile, + no_backups=no_backups, + xhiveframework_path=xhiveframework_path, + xhiveframework_branch=xhiveframework_branch, + install_app=install_app, + clone_from=clone_from, + skip_redis_config_generation=skip_redis_config_generation, + clone_without_update=clone_without_update, + skip_assets=skip_assets, + python=python, + verbose=verbose, + dev=dev, + ) + log(f"Bench {path} initialized", level=1) + except SystemExit: + raise + except Exception: + import shutil + import time + + from bench.utils import get_traceback + + # add a sleep here so that the traceback of other processes doesnt overlap with the prompts + time.sleep(1) + print(get_traceback()) + + log(f"There was a problem while creating {path}", level=2) + if click.confirm("Do you want to rollback these changes?", abort=True): + log(f'Rolling back Bench "{path}"') + if os.path.exists(path): + shutil.rmtree(path) + + +@click.command("drop") +@click.argument("path") +def drop(path): + from bench.bench import Bench + from bench.exceptions import BenchNotFoundError, ValidationError + + bench = Bench(path) + + if not bench.exists: + raise BenchNotFoundError(f"Bench {bench.name} does not exist") + + if bench.sites: + raise ValidationError("Cannot remove non-empty bench directory") + + bench.drop() + + print("Bench dropped") + + +@click.command( + ["get", "get-app"], + help="Clone an app from the internet or filesystem and set it up in your bench", +) +@click.argument("name", nargs=-1) # Dummy argument for backward compatibility +@click.argument("git-url") +@click.option("--branch", default=None, help="branch to checkout") +@click.option("--overwrite", is_flag=True, default=False) +@click.option("--skip-assets", is_flag=True, default=False, help="Do not build assets") +@click.option( + "--soft-link", + is_flag=True, + default=False, + help="Create a soft link to git repo instead of clone.", +) +@click.option( + "--init-bench", is_flag=True, default=False, help="Initialize Bench if not in one" +) +@click.option( + "--resolve-deps", + is_flag=True, + default=False, + help="Resolve dependencies before installing app", +) +@click.option( + "--cache-key", + type=str, + default=None, + help="Caches get-app artifacts if provided (only first 10 chars is used)", +) +@click.option( + "--compress-artifacts", + is_flag=True, + default=False, + help="Whether to gzip get-app artifacts that are to be cached", +) +def get_app( + git_url, + branch, + name=None, + overwrite=False, + skip_assets=False, + soft_link=False, + init_bench=False, + resolve_deps=False, + cache_key=None, + compress_artifacts=False, +): + "clone an app from the internet and set it up in your bench" + from bench.app import get_app + + get_app( + git_url, + branch=branch, + skip_assets=skip_assets, + overwrite=overwrite, + soft_link=soft_link, + init_bench=init_bench, + resolve_deps=resolve_deps, + cache_key=cache_key, + compress_artifacts=compress_artifacts, + ) + + +@click.command("new-app", help="Create a new Xhiveframework application under apps folder") +@click.option( + "--no-git", + is_flag=True, + flag_value="--no-git", + help="Do not initialize git repository for the app (available in Xhiveframework v14+)", +) +@click.argument("app-name") +def new_app(app_name, no_git=None): + from bench.app import new_app + + new_app(app_name, no_git) + + +@click.command( + ["remove", "rm", "remove-app"], + help=( + "Completely remove app from bench and re-build assets if not installed on any site" + ), +) +@click.option("--no-backup", is_flag=True, help="Do not backup app before removing") +@click.option("--force", is_flag=True, help="Force remove app") +@click.argument("app-name") +def remove_app(app_name, no_backup=False, force=False): + from bench.bench import Bench + + bench = Bench(".") + bench.uninstall(app_name, no_backup=no_backup, force=force) + + +@click.command("exclude-app", help="Exclude app from updating") +@click.argument("app_name") +def exclude_app_for_update(app_name): + from bench.app import add_to_excluded_apps_txt + + add_to_excluded_apps_txt(app_name) + + +@click.command("include-app", help="Include app for updating") +@click.argument("app_name") +def include_app_for_update(app_name): + "Include app from updating" + from bench.app import remove_from_excluded_apps_txt + + remove_from_excluded_apps_txt(app_name) + + +@click.command( + "pip", + context_settings={"ignore_unknown_options": True, "help_option_names": []}, + help="For pip help use `bench pip help [COMMAND]` or `bench pip [COMMAND] -h`", +) +@click.argument("args", nargs=-1) +@click.pass_context +def pip(ctx, args): + "Run pip commands in bench env" + import os + + from bench.utils.bench import get_env_cmd + + env_py = get_env_cmd("python") + os.execv(env_py, (env_py, "-m", "pip") + args) + + +@click.command( + "validate-dependencies", + help="Validates that all requirements specified in xhiveframework-dependencies are met curently.", +) +@click.pass_context +def validate_dependencies(ctx): + "Validate all specified xhiveframework-dependencies." + from bench.bench import Bench + from bench.app import App + + bench = Bench(".") + + for app_name in bench.apps: + app = App(app_name, bench=bench) + app.validate_app_dependencies(throw=True) diff --git a/bench/commands/setup.py b/bench/commands/setup.py new file mode 100755 index 0000000..c7852b0 --- /dev/null +++ b/bench/commands/setup.py @@ -0,0 +1,447 @@ +# imports - standard imports +import os +import sys + +# imports - third party imports +import click + +# imports - module imports +from bench.utils import exec_cmd, run_playbook, which +from bench.utils.cli import SugaredOption + + +@click.group(help="Setup command group for enabling setting up a Xhiveframework environment") +def setup(): + pass + + +@click.command( + "sudoers", help="Add commands to sudoers list for execution without password" +) +@click.argument("user") +def setup_sudoers(user): + from bench.utils.system import setup_sudoers + + setup_sudoers(user) + + +@click.command("nginx", help="Generate configuration files for NGINX") +@click.option( + "--logging", default="combined", type=click.Choice(["none", "site", "combined"]) +) +@click.option( + "--log_format", + help="Specify the log_format for nginx. Use none or '' to not set a value.", + only_if_set=["logging"], + cls=SugaredOption, + default="main", +) +@click.option( + "--yes", help="Yes to regeneration of nginx config file", default=False, is_flag=True +) +def setup_nginx(yes=False, logging="combined", log_format=None): + from bench.config.nginx import make_nginx_conf + + make_nginx_conf(bench_path=".", yes=yes, logging=logging, log_format=log_format) + + +@click.command("reload-nginx", help="Checks NGINX config file and reloads service") +def reload_nginx(): + from bench.config.production_setup import reload_nginx + + reload_nginx() + + +@click.command("supervisor", help="Generate configuration for supervisor") +@click.option("--user", help="optional user argument") +@click.option( + "--yes", help="Yes to regeneration of supervisor config", is_flag=True, default=False +) +@click.option( + "--skip-redis", help="Skip redis configuration", is_flag=True, default=False +) +@click.option( + "--skip-supervisord", + help="Skip supervisord configuration", + is_flag=True, + default=False, +) +def setup_supervisor(user=None, yes=False, skip_redis=False, skip_supervisord=False): + from bench.utils import get_cmd_output + from bench.config.supervisor import ( + check_supervisord_config, + generate_supervisor_config, + ) + + if which("supervisorctl") is None: + click.secho("Please install `supervisor` to proceed", fg="red") + sys.exit(1) + + if not skip_supervisord and "Permission denied" in get_cmd_output( + "supervisorctl status" + ): + check_supervisord_config(user=user) + + generate_supervisor_config(bench_path=".", user=user, yes=yes, skip_redis=skip_redis) + + +@click.command("redis", help="Generates configuration for Redis") +def setup_redis(): + from bench.config.redis import generate_config + + generate_config(".") + + +@click.command("fonts", help="Add Xhiveframework fonts to system") +def setup_fonts(): + from bench.utils.system import setup_fonts + + setup_fonts() + + +@click.command( + "production", help="Setup Xhiveframework production environment for specific user" +) +@click.argument("user") +@click.option("--yes", help="Yes to regeneration config", is_flag=True, default=False) +def setup_production(user, yes=False): + from bench.config.production_setup import setup_production + + setup_production(user=user, yes=yes) + + +@click.command("backups", help="Add cronjob for bench backups") +def setup_backups(): + from bench.bench import Bench + + Bench(".").setup.backups() + + +@click.command("env", help="Setup Python environment for bench") +@click.option( + "--python", type=str, default="python3", help="Path to Python Executable." +) +def setup_env(python="python3"): + from bench.bench import Bench + + return Bench(".").setup.env(python=python) + + +@click.command("firewall", help="Setup firewall for system") +@click.option("--ssh_port") +@click.option("--force") +def setup_firewall(ssh_port=None, force=False): + if not force: + click.confirm( + f"Setting up the firewall will block all ports except 80, 443 and {ssh_port}\nDo you want to continue?", + abort=True, + ) + + if not ssh_port: + ssh_port = 22 + + run_playbook("roles/bench/tasks/setup_firewall.yml", {"ssh_port": ssh_port}) + + +@click.command("ssh-port", help="Set SSH Port for system") +@click.argument("port") +@click.option("--force") +def set_ssh_port(port, force=False): + if not force: + click.confirm( + f"This will change your SSH Port to {port}\nDo you want to continue?", abort=True + ) + + run_playbook("roles/bench/tasks/change_ssh_port.yml", {"ssh_port": port}) + + +@click.command("lets-encrypt", help="Setup lets-encrypt SSL for site") +@click.argument("site") +@click.option("--custom-domain") +@click.option( + "-n", + "--non-interactive", + default=False, + is_flag=True, + help="Run command non-interactively. This flag restarts nginx and runs certbot non interactively. Shouldn't be used on 1'st attempt", +) +def setup_letsencrypt(site, custom_domain, non_interactive): + from bench.config.lets_encrypt import setup_letsencrypt + + setup_letsencrypt(site, custom_domain, bench_path=".", interactive=not non_interactive) + + +@click.command( + "wildcard-ssl", help="Setup wildcard SSL certificate for multi-tenant bench" +) +@click.argument("domain") +@click.option("--email") +@click.option( + "--exclude-base-domain", + default=False, + is_flag=True, + help="SSL Certificate not applicable for base domain", +) +def setup_wildcard_ssl(domain, email, exclude_base_domain): + from bench.config.lets_encrypt import setup_wildcard_ssl + + setup_wildcard_ssl( + domain, email, bench_path=".", exclude_base_domain=exclude_base_domain + ) + + +@click.command("procfile", help="Generate Procfile for bench start") +def setup_procfile(): + from bench.config.procfile import setup_procfile + + setup_procfile(".") + + +@click.command( + "socketio", help="[DEPRECATED] Setup node dependencies for socketio server" +) +def setup_socketio(): + return + + +@click.command("requirements") +@click.option("--node", help="Update only Node packages", default=False, is_flag=True) +@click.option( + "--python", help="Update only Python packages", default=False, is_flag=True +) +@click.option( + "--dev", + help="Install optional python development dependencies", + default=False, + is_flag=True, +) +@click.argument("apps", nargs=-1) +def setup_requirements(node=False, python=False, dev=False, apps=None): + """ + Setup Python and Node dependencies. + + You can optionally specify one or more apps to setup dependencies for. + """ + from bench.bench import Bench + + bench = Bench(".") + + if not (node or python or dev): + bench.setup.requirements(apps=apps) + + elif not node and not dev: + bench.setup.python(apps=apps) + + elif not python and not dev: + bench.setup.node(apps=apps) + + else: + from bench.utils.bench import install_python_dev_dependencies + + install_python_dev_dependencies(apps=apps) + + if node: + click.secho( + "--dev flag only supports python dependencies. All node development dependencies are installed by default.", + fg="yellow", + ) + + +@click.command( + "manager", + help="Setup bench-manager.local site with the bench_manager app installed on it", +) +@click.option( + "--yes", help="Yes to regeneration of nginx config file", default=False, is_flag=True +) +@click.option( + "--port", help="Port on which you want to run bench manager", default=23624 +) +@click.option("--domain", help="Domain on which you want to run bench manager") +def setup_manager(yes=False, port=23624, domain=None): + from bench.bench import Bench + from bench.config.nginx import make_bench_manager_nginx_conf + + create_new_site = True + + if "bench-manager.local" in os.listdir("sites"): + create_new_site = click.confirm("Site already exists. Overwrite existing site?") + + if create_new_site: + exec_cmd("bench new-site --force bench-manager.local") + + if "bench_manager" in os.listdir("apps"): + print("App already exists. Skipping app download.") + else: + exec_cmd("bench get-app bench_manager") + + exec_cmd("bench --site bench-manager.local install-app bench_manager") + + bench_path = "." + bench = Bench(bench_path) + + if bench.conf.get("restart_supervisor_on_update") or bench.conf.get( + "restart_systemd_on_update" + ): + # implicates a production setup or so I presume + if not domain: + print( + "Please specify the site name on which you want to host bench-manager using the 'domain' flag" + ) + sys.exit(1) + + if domain not in bench.sites: + raise Exception("No such site") + + make_bench_manager_nginx_conf(bench_path, yes=yes, port=port, domain=domain) + + +@click.command("config", help="Generate or over-write sites/common_site_config.json") +def setup_config(): + from bench.config.common_site_config import setup_config + + setup_config(".") + + +@click.command("add-domain", help="Add a custom domain to a particular site") +@click.argument("domain") +@click.option("--site", prompt=True) +@click.option("--ssl-certificate", help="Absolute path to SSL Certificate") +@click.option("--ssl-certificate-key", help="Absolute path to SSL Certificate Key") +def add_domain(domain, site=None, ssl_certificate=None, ssl_certificate_key=None): + """Add custom domain to site""" + if not site: + print("Please specify site") + sys.exit(1) + + from bench.config.site_config import add_domain + + add_domain(site, domain, ssl_certificate, ssl_certificate_key, bench_path=".") + + +@click.command("remove-domain", help="Remove custom domain from a site") +@click.argument("domain") +@click.option("--site", prompt=True) +def remove_domain(domain, site=None): + if not site: + print("Please specify site") + sys.exit(1) + + from bench.config.site_config import remove_domain + + remove_domain(site, domain, bench_path=".") + + +@click.command( + "sync-domains", + help="Check if there is a change in domains. If yes, updates the domains list.", +) +@click.option("--domain", multiple=True) +@click.option("--site", prompt=True) +def sync_domains(domain=None, site=None): + if not site: + print("Please specify site") + sys.exit(1) + + try: + domains = list(map(str, domain)) + except Exception: + print("Domains should be a json list of strings or dictionaries") + sys.exit(1) + + from bench.config.site_config import sync_domains + + changed = sync_domains(site, domains, bench_path=".") + + # if changed, success, else failure + sys.exit(0 if changed else 1) + + +@click.command("role", help="Install dependencies via ansible roles") +@click.argument("role") +@click.option("--admin_emails", default="") +@click.option("--mysql_root_password", "--mariadb_root_password") +@click.option("--container", is_flag=True, default=False) +def setup_roles(role, **kwargs): + extra_vars = {"production": True} + extra_vars.update(kwargs) + + if role: + run_playbook("site.yml", extra_vars=extra_vars, tag=role) + else: + run_playbook("site.yml", extra_vars=extra_vars) + + +@click.command( + "fail2ban", + help="Setup fail2ban, an intrusion prevention software framework that protects computer servers from brute-force attacks", +) +@click.option( + "--maxretry", + default=6, + help="Number of matches (i.e. value of the counter) which triggers ban action on the IP. Default is 6 seconds", +) +@click.option( + "--bantime", + default=600, + help="Duration (in seconds) for IP to be banned for. Negative number for 'permanent' ban. Default is 600 seconds", +) +@click.option( + "--findtime", + default=600, + help="The counter is set to zero if match found within 'findtime' seconds doesn't exceed 'maxretry'. Default is 600 seconds", +) +def setup_nginx_proxy_jail(**kwargs): + run_playbook("roles/fail2ban/tasks/configure_nginx_jail.yml", extra_vars=kwargs) + + +@click.command("systemd", help="Generate configuration for systemd") +@click.option("--user", help="Optional user argument") +@click.option( + "--yes", + help="Yes to regeneration of systemd config files", + is_flag=True, + default=False, +) +@click.option("--stop", help="Stop bench services", is_flag=True, default=False) +@click.option("--create-symlinks", help="Create Symlinks", is_flag=True, default=False) +@click.option("--delete-symlinks", help="Delete Symlinks", is_flag=True, default=False) +def setup_systemd( + user=None, yes=False, stop=False, create_symlinks=False, delete_symlinks=False +): + from bench.config.systemd import generate_systemd_config + + generate_systemd_config( + bench_path=".", + user=user, + yes=yes, + stop=stop, + create_symlinks=create_symlinks, + delete_symlinks=delete_symlinks, + ) + + +setup.add_command(setup_sudoers) +setup.add_command(setup_nginx) +setup.add_command(reload_nginx) +setup.add_command(setup_supervisor) +setup.add_command(setup_redis) +setup.add_command(setup_letsencrypt) +setup.add_command(setup_wildcard_ssl) +setup.add_command(setup_production) +setup.add_command(setup_backups) +setup.add_command(setup_env) +setup.add_command(setup_procfile) +setup.add_command(setup_socketio) +setup.add_command(setup_requirements) +setup.add_command(setup_manager) +setup.add_command(setup_config) +setup.add_command(setup_fonts) +setup.add_command(add_domain) +setup.add_command(remove_domain) +setup.add_command(sync_domains) +setup.add_command(setup_firewall) +setup.add_command(set_ssh_port) +setup.add_command(setup_roles) +setup.add_command(setup_nginx_proxy_jail) +setup.add_command(setup_systemd) diff --git a/bench/commands/update.py b/bench/commands/update.py new file mode 100755 index 0000000..a7a6cce --- /dev/null +++ b/bench/commands/update.py @@ -0,0 +1,101 @@ +# imports - third party imports +import click + +# imports - module imports +from bench.app import pull_apps +from bench.utils.bench import post_upgrade, patch_sites, build_assets + + +@click.command( + "update", + help="Performs an update operation on current bench. Without any flags will backup, pull, setup requirements, build, run patches and restart bench. Using specific flags will only do certain tasks instead of all", +) +@click.option("--pull", is_flag=True, help="Pull updates for all the apps in bench") +@click.option("--apps", type=str) +@click.option("--patch", is_flag=True, help="Run migrations for all sites in the bench") +@click.option("--build", is_flag=True, help="Build JS and CSS assets for the bench") +@click.option( + "--requirements", + is_flag=True, + help="Update requirements. If run alone, equivalent to `bench setup requirements`", +) +@click.option( + "--restart-supervisor", is_flag=True, help="Restart supervisor processes after update" +) +@click.option( + "--restart-systemd", is_flag=True, help="Restart systemd units after update" +) +@click.option( + "--no-backup", + is_flag=True, + help="If this flag is set, sites won't be backed up prior to updates. Note: This is not recommended in production.", +) +@click.option( + "--no-compile", + is_flag=True, + help="[DEPRECATED] This flag doesn't do anything now.", +) +@click.option("--force", is_flag=True, help="Forces major version upgrades") +@click.option( + "--reset", + is_flag=True, + help="Hard resets git branch's to their new states overriding any changes and overriding rebase on pull", +) +def update( + pull, + apps, + patch, + build, + requirements, + restart_supervisor, + restart_systemd, + no_backup, + no_compile, + force, + reset, +): + from bench.utils.bench import update + + update( + pull=pull, + apps=apps, + patch=patch, + build=build, + requirements=requirements, + restart_supervisor=restart_supervisor, + restart_systemd=restart_systemd, + backup=not no_backup, + compile=not no_compile, + force=force, + reset=reset, + ) + + +@click.command("retry-upgrade", help="Retry a failed upgrade") +@click.option("--version", default=5) +def retry_upgrade(version): + pull_apps() + patch_sites() + build_assets() + post_upgrade(version - 1, version) + + +@click.command( + "switch-to-branch", + help="Switch all apps to specified branch, or specify apps separated by space", +) +@click.argument("branch") +@click.argument("apps", nargs=-1) +@click.option("--upgrade", is_flag=True) +def switch_to_branch(branch, apps, upgrade=False): + from bench.utils.app import switch_to_branch + + switch_to_branch(branch=branch, apps=list(apps), upgrade=upgrade) + + +@click.command("switch-to-develop") +def switch_to_develop(upgrade=False): + "Switch xhiveframework and xhiveerp to develop branch" + from bench.utils.app import switch_to_develop + + switch_to_develop(apps=["xhiveframework", "xhiveerp"]) diff --git a/bench/commands/utils.py b/bench/commands/utils.py new file mode 100644 index 0000000..ce826e0 --- /dev/null +++ b/bench/commands/utils.py @@ -0,0 +1,196 @@ +# imports - standard imports +import os + +# imports - third party imports +import click + + +@click.command("start", help="Start Xhiveframework development processes") +@click.option("--no-dev", is_flag=True, default=False) +@click.option( + "--no-prefix", + is_flag=True, + default=False, + help="Hide process name from bench start log", +) +@click.option("--concurrency", "-c", type=str) +@click.option("--procfile", "-p", type=str) +@click.option("--man", "-m", help="Process Manager of your choice ;)") +def start(no_dev, concurrency, procfile, no_prefix, man): + from bench.utils.system import start + + start( + no_dev=no_dev, + concurrency=concurrency, + procfile=procfile, + no_prefix=no_prefix, + procman=man, + ) + + +@click.command("restart", help="Restart supervisor processes or systemd units") +@click.option("--web", is_flag=True, default=False) +@click.option("--supervisor", is_flag=True, default=False) +@click.option("--systemd", is_flag=True, default=False) +def restart(web, supervisor, systemd): + from bench.bench import Bench + + if not systemd and not web: + supervisor = True + + Bench(".").reload(web, supervisor, systemd) + + +@click.command("set-nginx-port", help="Set NGINX port for site") +@click.argument("site") +@click.argument("port", type=int) +def set_nginx_port(site, port): + from bench.config.site_config import set_nginx_port + + set_nginx_port(site, port) + + +@click.command("set-ssl-certificate", help="Set SSL certificate path for site") +@click.argument("site") +@click.argument("ssl-certificate-path") +def set_ssl_certificate(site, ssl_certificate_path): + from bench.config.site_config import set_ssl_certificate + + set_ssl_certificate(site, ssl_certificate_path) + + +@click.command("set-ssl-key", help="Set SSL certificate private key path for site") +@click.argument("site") +@click.argument("ssl-certificate-key-path") +def set_ssl_certificate_key(site, ssl_certificate_key_path): + from bench.config.site_config import set_ssl_certificate_key + + set_ssl_certificate_key(site, ssl_certificate_key_path) + + +@click.command("set-url-root", help="Set URL root for site") +@click.argument("site") +@click.argument("url-root") +def set_url_root(site, url_root): + from bench.config.site_config import set_url_root + + set_url_root(site, url_root) + + +@click.command("set-mariadb-host", help="Set MariaDB host for bench") +@click.argument("host") +def set_mariadb_host(host): + from bench.utils.bench import set_mariadb_host + + set_mariadb_host(host) + + +@click.command("set-redis-cache-host", help="Set Redis cache host for bench") +@click.argument("host") +def set_redis_cache_host(host): + """ + Usage: bench set-redis-cache-host localhost:6379/1 + """ + from bench.utils.bench import set_redis_cache_host + + set_redis_cache_host(host) + + +@click.command("set-redis-queue-host", help="Set Redis queue host for bench") +@click.argument("host") +def set_redis_queue_host(host): + """ + Usage: bench set-redis-queue-host localhost:6379/2 + """ + from bench.utils.bench import set_redis_queue_host + + set_redis_queue_host(host) + + +@click.command("set-redis-socketio-host", help="Set Redis socketio host for bench") +@click.argument("host") +def set_redis_socketio_host(host): + """ + Usage: bench set-redis-socketio-host localhost:6379/3 + """ + from bench.utils.bench import set_redis_socketio_host + + set_redis_socketio_host(host) + + +@click.command("download-translations", help="Download latest translations") +def download_translations(): + from bench.utils.translation import download_translations_p + + download_translations_p() + + +@click.command( + "renew-lets-encrypt", help="Sets Up latest cron and Renew Let's Encrypt certificate" +) +def renew_lets_encrypt(): + from bench.config.lets_encrypt import renew_certs + + renew_certs() + + +@click.command("backup-all-sites", help="Backup all sites in current bench") +def backup_all_sites(): + from bench.utils.system import backup_all_sites + + backup_all_sites(bench_path=".") + + +@click.command( + "disable-production", help="Disables production environment for the bench." +) +def disable_production(): + from bench.config.production_setup import disable_production + + disable_production(bench_path=".") + + +@click.command( + "src", help="Prints bench source folder path, which can be used as: cd `bench src`" +) +def bench_src(): + from bench.cli import src + + print(os.path.dirname(src)) + + +@click.command("find", help="Finds benches recursively from location") +@click.argument("location", default="") +def find_benches(location): + from bench.utils import find_benches + + find_benches(directory=location) + + +@click.command( + "migrate-env", help="Migrate Virtual Environment to desired Python Version" +) +@click.argument("python", type=str) +@click.option("--no-backup", "backup", is_flag=True, default=True) +def migrate_env(python, backup=True): + from bench.utils.bench import migrate_env + + migrate_env(python=python, backup=backup) + + +@click.command("app-cache", help="View or remove items belonging to bench get-app cache") +@click.option("--clear", is_flag=True, default=False, help="Remove all items") +@click.option( + "--remove-app", + default="", + help="Removes all items that match provided app name", +) +@click.option( + "--remove-key", + default="", + help="Removes all items that matches provided cache key", +) +def app_cache_helper(clear=False, remove_app="", remove_key=""): + from bench.utils.bench import cache_helper + + cache_helper(clear, remove_app, remove_key) diff --git a/bench/config/__init__.py b/bench/config/__init__.py new file mode 100644 index 0000000..64fdcac --- /dev/null +++ b/bench/config/__init__.py @@ -0,0 +1,7 @@ +"""Module for setting up system and respective bench configurations""" + + +def env(): + from jinja2 import Environment, PackageLoader + + return Environment(loader=PackageLoader("bench.config")) diff --git a/bench/config/common_site_config.py b/bench/config/common_site_config.py new file mode 100644 index 0000000..23a29f6 --- /dev/null +++ b/bench/config/common_site_config.py @@ -0,0 +1,144 @@ +# imports - standard imports +import getpass +import json +import os + +default_config = { + "restart_supervisor_on_update": False, + "restart_systemd_on_update": False, + "serve_default_site": True, + "rebase_on_pull": False, + "xhiveframework_user": getpass.getuser(), + "shallow_clone": True, + "background_workers": 1, + "use_redis_auth": False, + "live_reload": True, +} + +DEFAULT_MAX_REQUESTS = 5000 + + +def setup_config(bench_path, additional_config=None): + make_pid_folder(bench_path) + bench_config = get_config(bench_path) + bench_config.update(default_config) + bench_config.update(get_gunicorn_workers()) + update_config_for_xhiveframework(bench_config, bench_path) + if additional_config: + bench_config.update(additional_config) + + put_config(bench_config, bench_path) + + +def get_config(bench_path): + return get_common_site_config(bench_path) + + +def get_common_site_config(bench_path): + config_path = get_config_path(bench_path) + if not os.path.exists(config_path): + return {} + with open(config_path) as f: + return json.load(f) + + +def put_config(config, bench_path="."): + config_path = get_config_path(bench_path) + with open(config_path, "w") as f: + return json.dump(config, f, indent=1, sort_keys=True) + + +def update_config(new_config, bench_path="."): + config = get_config(bench_path=bench_path) + config.update(new_config) + put_config(config, bench_path=bench_path) + + +def get_config_path(bench_path): + return os.path.join(bench_path, "sites", "common_site_config.json") + + +def get_gunicorn_workers(): + """This function will return the maximum workers that can be started depending upon + number of cpu's present on the machine""" + import multiprocessing + + return {"gunicorn_workers": multiprocessing.cpu_count() * 2 + 1} + + +def compute_max_requests_jitter(max_requests: int) -> int: + return int(max_requests * 0.1) + + +def get_default_max_requests(worker_count: int): + """Get max requests and jitter config based on number of available workers.""" + + if worker_count <= 1: + # If there's only one worker then random restart can cause spikes in response times and + # can be annoying. Hence not enabled by default. + return 0 + return DEFAULT_MAX_REQUESTS + + +def update_config_for_xhiveframework(config, bench_path): + ports = make_ports(bench_path) + + for key in ("redis_cache", "redis_queue", "redis_socketio"): + if key not in config: + config[key] = f"redis://127.0.0.1:{ports[key]}" + + for key in ("webserver_port", "socketio_port", "file_watcher_port"): + if key not in config: + config[key] = ports[key] + + +def make_ports(bench_path): + from urllib.parse import urlparse + + benches_path = os.path.dirname(os.path.abspath(bench_path)) + + default_ports = { + "webserver_port": 8000, + "socketio_port": 9000, + "file_watcher_port": 6787, + "redis_queue": 11000, + "redis_socketio": 13000, + "redis_cache": 13000, + } + + # collect all existing ports + existing_ports = {} + for folder in os.listdir(benches_path): + bench_path = os.path.join(benches_path, folder) + if os.path.isdir(bench_path): + bench_config = get_config(bench_path) + for key in list(default_ports.keys()): + value = bench_config.get(key) + + # extract port from redis url + if value and (key in ("redis_cache", "redis_queue", "redis_socketio")): + value = urlparse(value).port + + if value: + existing_ports.setdefault(key, []).append(value) + + # new port value = max of existing port value + 1 + ports = {} + for key, value in list(default_ports.items()): + existing_value = existing_ports.get(key, []) + if existing_value: + value = max(existing_value) + 1 + + ports[key] = value + + # Backward compatbility: always keep redis_cache and redis_socketio port same + # Note: not required from v15 + ports["redis_socketio"] = ports["redis_cache"] + + return ports + + +def make_pid_folder(bench_path): + pids_path = os.path.join(bench_path, "config", "pids") + if not os.path.exists(pids_path): + os.makedirs(pids_path) diff --git a/bench/config/lets_encrypt.py b/bench/config/lets_encrypt.py new file mode 100755 index 0000000..c1c7298 --- /dev/null +++ b/bench/config/lets_encrypt.py @@ -0,0 +1,196 @@ +# imports - standard imports +import os + +# imports - third party imports +import click + +# imports - module imports +import bench +from bench.config.nginx import make_nginx_conf +from bench.config.production_setup import service +from bench.config.site_config import get_domains, remove_domain, update_site_config +from bench.bench import Bench +from bench.utils import exec_cmd, which +from bench.utils.bench import update_common_site_config +from bench.exceptions import CommandFailedError + + +def setup_letsencrypt(site, custom_domain, bench_path, interactive): + + site_path = os.path.join(bench_path, "sites", site, "site_config.json") + if not os.path.exists(os.path.dirname(site_path)): + print("No site named " + site) + return + + if custom_domain: + domains = get_domains(site, bench_path) + for d in domains: + if isinstance(d, dict) and d["domain"] == custom_domain: + print(f"SSL for Domain {custom_domain} already exists") + return + + if custom_domain not in domains: + print(f"No custom domain named {custom_domain} set for site") + return + + if interactive: + click.confirm( + "Running this will stop the nginx service temporarily causing your sites to go offline\n" + "Do you want to continue?", + abort=True, + ) + + if not Bench(bench_path).conf.get("dns_multitenant"): + print("You cannot setup SSL without DNS Multitenancy") + return + + create_config(site, custom_domain) + run_certbot_and_setup_ssl(site, custom_domain, bench_path, interactive) + setup_crontab() + + +def create_config(site, custom_domain): + config = ( + bench.config.env() + .get_template("letsencrypt.cfg") + .render(domain=custom_domain or site) + ) + config_path = f"/etc/letsencrypt/configs/{custom_domain or site}.cfg" + create_dir_if_missing(config_path) + + with open(config_path, "w") as f: + f.write(config) + + +def run_certbot_and_setup_ssl(site, custom_domain, bench_path, interactive=True): + service("nginx", "stop") + + try: + interactive = "" if interactive else "-n" + exec_cmd( + f"{get_certbot_path()} {interactive} --config /etc/letsencrypt/configs/{custom_domain or site}.cfg certonly" + ) + except CommandFailedError: + service("nginx", "start") + print("There was a problem trying to setup SSL for your site") + return + + ssl_path = f"/etc/letsencrypt/live/{custom_domain or site}/" + ssl_config = { + "ssl_certificate": os.path.join(ssl_path, "fullchain.pem"), + "ssl_certificate_key": os.path.join(ssl_path, "privkey.pem"), + } + + if custom_domain: + remove_domain(site, custom_domain, bench_path) + domains = get_domains(site, bench_path) + ssl_config["domain"] = custom_domain + domains.append(ssl_config) + update_site_config(site, {"domains": domains}, bench_path=bench_path) + else: + update_site_config(site, ssl_config, bench_path=bench_path) + + make_nginx_conf(bench_path) + service("nginx", "start") + + +def setup_crontab(): + from crontab import CronTab + + job_command = ( + f'{get_certbot_path()} renew -a nginx --post-hook "systemctl reload nginx"' + ) + job_comment = "Renew lets-encrypt every month" + print(f"Setting Up cron job to {job_comment}") + + system_crontab = CronTab(user="root") + + for job in system_crontab.find_comment(comment=job_comment): # Removes older entries + system_crontab.remove(job) + + job = system_crontab.new(command=job_command, comment=job_comment) + job.setall("0 0 */1 * *") # Run at 00:00 every day-of-month + system_crontab.write() + + +def create_dir_if_missing(path): + if not os.path.exists(os.path.dirname(path)): + os.makedirs(os.path.dirname(path)) + + +def get_certbot_path(): + try: + return which("certbot", raise_err=True) + except FileNotFoundError: + raise CommandFailedError( + "Certbot is not installed on your system. Please visit https://certbot.eff.org/instructions for installation instructions, then try again." + ) + + +def renew_certs(): + # Needs to be run with sudo + click.confirm( + "Running this will stop the nginx service temporarily causing your sites to go offline\n" + "Do you want to continue?", + abort=True, + ) + + setup_crontab() + + service("nginx", "stop") + exec_cmd(f"{get_certbot_path()} renew") + service("nginx", "start") + + +def setup_wildcard_ssl(domain, email, bench_path, exclude_base_domain): + def _get_domains(domain): + domain_list = [domain] + + if not domain.startswith("*."): + # add wildcard caracter to domain if missing + domain_list.append(f"*.{domain}") + else: + # include base domain based on flag + domain_list.append(domain.replace("*.", "")) + + if exclude_base_domain: + domain_list.remove(domain.replace("*.", "")) + + return domain_list + + if not Bench(bench_path).conf.get("dns_multitenant"): + print("You cannot setup SSL without DNS Multitenancy") + return + + domain_list = _get_domains(domain.strip()) + + email_param = "" + if email: + email_param = f"--email {email}" + + try: + exec_cmd( + f"{get_certbot_path()} certonly --manual --preferred-challenges=dns {email_param} \ + --server https://acme-v02.api.letsencrypt.org/directory \ + --agree-tos -d {' -d '.join(domain_list)}" + ) + + except CommandFailedError: + print("There was a problem trying to setup SSL") + return + + ssl_path = f"/etc/letsencrypt/live/{domain}/" + ssl_config = { + "wildcard": { + "domain": domain, + "ssl_certificate": os.path.join(ssl_path, "fullchain.pem"), + "ssl_certificate_key": os.path.join(ssl_path, "privkey.pem"), + } + } + + update_common_site_config(ssl_config) + setup_crontab() + + make_nginx_conf(bench_path) + print("Restrting Nginx service") + service("nginx", "restart") diff --git a/bench/config/nginx.py b/bench/config/nginx.py new file mode 100644 index 0000000..5bf67f2 --- /dev/null +++ b/bench/config/nginx.py @@ -0,0 +1,302 @@ +# imports - standard imports +import hashlib +import os +import random +import string + +# imports - third party imports +import click + +# imports - module imports +import bench +import bench.config +from bench.bench import Bench +from bench.utils import get_bench_name + + +def make_nginx_conf(bench_path, yes=False, logging=None, log_format=None): + conf_path = os.path.join(bench_path, "config", "nginx.conf") + + if not yes and os.path.exists(conf_path): + if not click.confirm( + "nginx.conf already exists and this will overwrite it. Do you want to continue?" + ): + return + + template = bench.config.env().get_template("nginx.conf") + bench_path = os.path.abspath(bench_path) + sites_path = os.path.join(bench_path, "sites") + + config = Bench(bench_path).conf + sites = prepare_sites(config, bench_path) + bench_name = get_bench_name(bench_path) + + allow_rate_limiting = config.get("allow_rate_limiting", False) + + template_vars = { + "sites_path": sites_path, + "http_timeout": config.get("http_timeout"), + "sites": sites, + "webserver_port": config.get("webserver_port"), + "socketio_port": config.get("socketio_port"), + "bench_name": bench_name, + "error_pages": get_error_pages(), + "allow_rate_limiting": allow_rate_limiting, + # for nginx map variable + "random_string": "".join(random.choice(string.ascii_lowercase) for i in range(7)), + } + + if logging and logging != "none": + _log_format = "" + if log_format and log_format != "none": + _log_format = log_format + template_vars["logging"] = {"level": logging, "log_format": _log_format} + + if allow_rate_limiting: + template_vars.update( + { + "bench_name_hash": hashlib.sha256(bench_name).hexdigest()[:16], + "limit_conn_shared_memory": get_limit_conn_shared_memory(), + } + ) + + nginx_conf = template.render(**template_vars) + + with open(conf_path, "w") as f: + f.write(nginx_conf) + + +def make_bench_manager_nginx_conf(bench_path, yes=False, port=23624, domain=None): + from bench.config.site_config import get_site_config + + template = bench.config.env().get_template("bench_manager_nginx.conf") + bench_path = os.path.abspath(bench_path) + sites_path = os.path.join(bench_path, "sites") + + config = Bench(bench_path).conf + site_config = get_site_config(domain, bench_path=bench_path) + bench_name = get_bench_name(bench_path) + + template_vars = { + "port": port, + "domain": domain, + "bench_manager_site_name": "bench-manager.local", + "sites_path": sites_path, + "http_timeout": config.get("http_timeout"), + "webserver_port": config.get("webserver_port"), + "socketio_port": config.get("socketio_port"), + "bench_name": bench_name, + "error_pages": get_error_pages(), + "ssl_certificate": site_config.get("ssl_certificate"), + "ssl_certificate_key": site_config.get("ssl_certificate_key"), + } + + bench_manager_nginx_conf = template.render(**template_vars) + + conf_path = os.path.join(bench_path, "config", "nginx.conf") + + if not yes and os.path.exists(conf_path): + click.confirm( + "nginx.conf already exists and bench-manager configuration will be appended to it. Do you want to continue?", + abort=True, + ) + + with open(conf_path, "a") as myfile: + myfile.write(bench_manager_nginx_conf) + + +def prepare_sites(config, bench_path): + sites = { + "that_use_port": [], + "that_use_dns": [], + "that_use_ssl": [], + "that_use_wildcard_ssl": [], + } + + domain_map = {} + ports_in_use = {} + + dns_multitenant = config.get("dns_multitenant") + + shared_port_exception_found = False + sites_configs = get_sites_with_config(bench_path=bench_path) + + # preload all preset site ports to avoid conflicts + + if not dns_multitenant: + for site in sites_configs: + if site.get("port"): + if not site["port"] in ports_in_use: + ports_in_use[site["port"]] = [] + ports_in_use[site["port"]].append(site["name"]) + + for site in sites_configs: + if dns_multitenant: + domain = site.get("domain") + + if domain: + # when site's folder name is different than domain name + domain_map[domain] = site["name"] + + site_name = domain or site["name"] + + if site.get("wildcard"): + sites["that_use_wildcard_ssl"].append(site_name) + + if not sites.get("wildcard_ssl_certificate"): + sites["wildcard_ssl_certificate"] = site["ssl_certificate"] + sites["wildcard_ssl_certificate_key"] = site["ssl_certificate_key"] + + elif site.get("ssl_certificate") and site.get("ssl_certificate_key"): + sites["that_use_ssl"].append(site) + + else: + sites["that_use_dns"].append(site_name) + + else: + if not site.get("port"): + site["port"] = 80 + if site["port"] in ports_in_use: + site["port"] = 8001 + while site["port"] in ports_in_use: + site["port"] += 1 + + if site["port"] in ports_in_use and not site["name"] in ports_in_use[site["port"]]: + shared_port_exception_found = True + ports_in_use[site["port"]].append(site["name"]) + else: + ports_in_use[site["port"]] = [] + ports_in_use[site["port"]].append(site["name"]) + + sites["that_use_port"].append(site) + + if not dns_multitenant and shared_port_exception_found: + message = "Port conflicts found:" + port_conflict_index = 0 + for port_number in ports_in_use: + if len(ports_in_use[port_number]) > 1: + port_conflict_index += 1 + message += f"\n{port_conflict_index} - Port {port_number} is shared among sites:" + for site_name in ports_in_use[port_number]: + message += f" {site_name}" + raise Exception(message) + + if not dns_multitenant: + message = "Port configuration list:" + for site in sites_configs: + message += f"\n\nSite {site['name']} assigned port: {site['port']}" + + print(message) + + sites["domain_map"] = domain_map + + return sites + + +def get_sites_with_config(bench_path): + from bench.bench import Bench + from bench.config.site_config import get_site_config + + bench = Bench(bench_path) + sites = bench.sites + conf = bench.conf + dns_multitenant = conf.get("dns_multitenant") + + ret = [] + for site in sites: + try: + site_config = get_site_config(site, bench_path=bench_path) + except Exception as e: + strict_nginx = conf.get("strict_nginx") + if strict_nginx: + print( + f"\n\nERROR: The site config for the site {site} is broken.", + "If you want this command to pass, instead of just throwing an error,", + "You may remove the 'strict_nginx' flag from common_site_config.json or set it to 0", + "\n\n", + ) + raise e + else: + print( + f"\n\nWARNING: The site config for the site {site} is broken.", + "If you want this command to fail, instead of just showing a warning,", + "You may add the 'strict_nginx' flag to common_site_config.json and set it to 1", + "\n\n", + ) + continue + + ret.append( + { + "name": site, + "port": site_config.get("nginx_port"), + "ssl_certificate": site_config.get("ssl_certificate"), + "ssl_certificate_key": site_config.get("ssl_certificate_key"), + } + ) + + if dns_multitenant and site_config.get("domains"): + for domain in site_config.get("domains"): + # domain can be a string or a dict with 'domain', 'ssl_certificate', 'ssl_certificate_key' + if isinstance(domain, str): + domain = {"domain": domain} + + domain["name"] = site + ret.append(domain) + + use_wildcard_certificate(bench_path, ret) + + return ret + + +def use_wildcard_certificate(bench_path, ret): + """ + stored in common_site_config.json as: + "wildcard": { + "domain": "*.xhiveerp.com", + "ssl_certificate": "/path/to/xhiveerp.com.cert", + "ssl_certificate_key": "/path/to/xhiveerp.com.key" + } + """ + from bench.bench import Bench + + config = Bench(bench_path).conf + wildcard = config.get("wildcard") + + if not wildcard: + return + + domain = wildcard["domain"] + ssl_certificate = wildcard["ssl_certificate"] + ssl_certificate_key = wildcard["ssl_certificate_key"] + + # If domain is set as "*" all domains will be included + if domain.startswith("*"): + domain = domain[1:] + else: + domain = "." + domain + + for site in ret: + if site.get("ssl_certificate"): + continue + + if (site.get("domain") or site["name"]).endswith(domain): + # example: ends with .xhiveerp.com + site["ssl_certificate"] = ssl_certificate + site["ssl_certificate_key"] = ssl_certificate_key + site["wildcard"] = 1 + + +def get_error_pages(): + bench_app_path = os.path.abspath(bench.__path__[0]) + templates = os.path.join(bench_app_path, "config", "templates") + + return {502: os.path.join(templates, "502.html")} + + +def get_limit_conn_shared_memory(): + """Allocate 2 percent of total virtual memory as shared memory for nginx limit_conn_zone""" + total_vm = (os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES")) / ( + 1024 * 1024 + ) # in MB + + return int(0.02 * total_vm) diff --git a/bench/config/procfile.py b/bench/config/procfile.py new file mode 100755 index 0000000..7feaab7 --- /dev/null +++ b/bench/config/procfile.py @@ -0,0 +1,38 @@ +import os +import platform + +import click + +import bench +from bench.app import use_rq +from bench.bench import Bench +from bench.utils import which + + +def setup_procfile(bench_path, yes=False, skip_redis=False): + config = Bench(bench_path).conf + procfile_path = os.path.join(bench_path, "Procfile") + + is_mac = platform.system() == "Darwin" + if not yes and os.path.exists(procfile_path): + click.confirm( + "A Procfile already exists and this will overwrite it. Do you want to continue?", + abort=True, + ) + + procfile = ( + bench.config.env() + .get_template("Procfile") + .render( + node=which("node") or which("nodejs"), + use_rq=use_rq(bench_path), + webserver_port=config.get("webserver_port"), + CI=os.environ.get("CI"), + skip_redis=skip_redis, + workers=config.get("workers", {}), + is_mac=is_mac, + ) + ) + + with open(procfile_path, "w") as f: + f.write(procfile) diff --git a/bench/config/production_setup.py b/bench/config/production_setup.py new file mode 100755 index 0000000..35f624c --- /dev/null +++ b/bench/config/production_setup.py @@ -0,0 +1,206 @@ +# imports - standard imports +import contextlib +import os +import logging +import sys + +# imports - module imports +import bench +from bench.config.nginx import make_nginx_conf +from bench.config.supervisor import ( + generate_supervisor_config, + check_supervisord_config, +) +from bench.config.systemd import generate_systemd_config +from bench.bench import Bench +from bench.utils import exec_cmd, which, get_bench_name, get_cmd_output, log +from bench.utils.system import fix_prod_setup_perms +from bench.exceptions import CommandFailedError + +logger = logging.getLogger(bench.PROJECT_NAME) + + +def setup_production_prerequisites(): + """Installs ansible, fail2banc, NGINX and supervisor""" + if not which("ansible"): + exec_cmd(f"sudo {sys.executable} -m pip install ansible") + if not which("fail2ban-client"): + exec_cmd("bench setup role fail2ban") + if not which("nginx"): + exec_cmd("bench setup role nginx") + if not which("supervisord"): + exec_cmd("bench setup role supervisor") + + +def setup_production(user, bench_path=".", yes=False): + print("Setting Up prerequisites...") + setup_production_prerequisites() + + conf = Bench(bench_path).conf + + if conf.get("restart_supervisor_on_update") and conf.get("restart_systemd_on_update"): + raise Exception( + "You cannot use supervisor and systemd at the same time. Modify your common_site_config accordingly." + ) + + if conf.get("restart_systemd_on_update"): + print("Setting Up systemd...") + generate_systemd_config(bench_path=bench_path, user=user, yes=yes) + else: + print("Setting Up supervisor...") + check_supervisord_config(user=user) + generate_supervisor_config(bench_path=bench_path, user=user, yes=yes) + + print("Setting Up NGINX...") + make_nginx_conf(bench_path=bench_path, yes=yes) + fix_prod_setup_perms(bench_path, xhiveframework_user=user) + remove_default_nginx_configs() + + bench_name = get_bench_name(bench_path) + nginx_conf = f"/etc/nginx/conf.d/{bench_name}.conf" + + print("Setting Up symlinks and reloading services...") + if conf.get("restart_supervisor_on_update"): + supervisor_conf_extn = "ini" if is_centos7() else "conf" + supervisor_conf = os.path.join( + get_supervisor_confdir(), f"{bench_name}.{supervisor_conf_extn}" + ) + + # Check if symlink exists, If not then create it. + if not os.path.islink(supervisor_conf): + os.symlink( + os.path.abspath(os.path.join(bench_path, "config", "supervisor.conf")), + supervisor_conf, + ) + + if not os.path.islink(nginx_conf): + os.symlink( + os.path.abspath(os.path.join(bench_path, "config", "nginx.conf")), nginx_conf + ) + + if conf.get("restart_supervisor_on_update"): + reload_supervisor() + + if os.environ.get("NO_SERVICE_RESTART"): + return + + reload_nginx() + + +def disable_production(bench_path="."): + bench_name = get_bench_name(bench_path) + conf = Bench(bench_path).conf + + # supervisorctl + supervisor_conf_extn = "ini" if is_centos7() else "conf" + supervisor_conf = os.path.join( + get_supervisor_confdir(), f"{bench_name}.{supervisor_conf_extn}" + ) + + if os.path.islink(supervisor_conf): + os.unlink(supervisor_conf) + + if conf.get("restart_supervisor_on_update"): + reload_supervisor() + + # nginx + nginx_conf = f"/etc/nginx/conf.d/{bench_name}.conf" + + if os.path.islink(nginx_conf): + os.unlink(nginx_conf) + + reload_nginx() + + +def service(service_name, service_option): + if os.path.basename(which("systemctl") or "") == "systemctl" and is_running_systemd(): + exec_cmd(f"sudo systemctl {service_option} {service_name}") + + elif os.path.basename(which("service") or "") == "service": + exec_cmd(f"sudo service {service_name} {service_option}") + + else: + # look for 'service_manager' and 'service_manager_command' in environment + service_manager = os.environ.get("BENCH_SERVICE_MANAGER") + if service_manager: + service_manager_command = ( + os.environ.get("BENCH_SERVICE_MANAGER_COMMAND") + or f"{service_manager} {service_option} {service}" + ) + exec_cmd(service_manager_command) + + else: + log( + f"No service manager found: '{service_name} {service_option}' failed to execute", + level=2, + ) + + +def get_supervisor_confdir(): + possiblities = ( + "/etc/supervisor/conf.d", + "/etc/supervisor.d/", + "/etc/supervisord/conf.d", + "/etc/supervisord.d", + ) + for possiblity in possiblities: + if os.path.exists(possiblity): + return possiblity + + +def remove_default_nginx_configs(): + default_nginx_configs = [ + "/etc/nginx/conf.d/default.conf", + "/etc/nginx/sites-enabled/default", + ] + + for conf_file in default_nginx_configs: + if os.path.exists(conf_file): + os.unlink(conf_file) + + +def is_centos7(): + return ( + os.path.exists("/etc/redhat-release") + and get_cmd_output( + r"cat /etc/redhat-release | sed 's/Linux\ //g' | cut -d' ' -f3 | cut -d. -f1" + ).strip() + == "7" + ) + + +def is_running_systemd(): + with open("/proc/1/comm") as f: + comm = f.read().strip() + if comm == "init": + return False + elif comm == "systemd": + return True + return False + + +def reload_supervisor(): + supervisorctl = which("supervisorctl") + + with contextlib.suppress(CommandFailedError): + # first try reread/update + exec_cmd(f"{supervisorctl} reread") + exec_cmd(f"{supervisorctl} update") + return + with contextlib.suppress(CommandFailedError): + # something is wrong, so try reloading + exec_cmd(f"{supervisorctl} reload") + return + with contextlib.suppress(CommandFailedError): + # then try restart for centos + service("supervisord", "restart") + return + with contextlib.suppress(CommandFailedError): + # else try restart for ubuntu / debian + service("supervisor", "restart") + return + + +def reload_nginx(): + exec_cmd(f"sudo {which('nginx')} -t") + service("nginx", "reload") diff --git a/bench/config/redis.py b/bench/config/redis.py new file mode 100644 index 0000000..bca9699 --- /dev/null +++ b/bench/config/redis.py @@ -0,0 +1,89 @@ +# imports - standard imports +import os +import re +import subprocess + +# imports - module imports +import bench + + +def generate_config(bench_path): + from urllib.parse import urlparse + from bench.bench import Bench + + config = Bench(bench_path).conf + redis_version = get_redis_version() + + ports = {} + for key in ("redis_cache", "redis_queue"): + ports[key] = urlparse(config[key]).port + + write_redis_config( + template_name="redis_queue.conf", + context={ + "port": ports["redis_queue"], + "bench_path": os.path.abspath(bench_path), + "redis_version": redis_version, + }, + bench_path=bench_path, + ) + + write_redis_config( + template_name="redis_cache.conf", + context={ + "maxmemory": config.get("cache_maxmemory", get_max_redis_memory()), + "port": ports["redis_cache"], + "redis_version": redis_version, + }, + bench_path=bench_path, + ) + + # make pids folder + pid_path = os.path.join(bench_path, "config", "pids") + if not os.path.exists(pid_path): + os.makedirs(pid_path) + + # ACL feature is introduced in Redis 6.0 + if redis_version < 6.0: + return + + # make ACL files + acl_rq_path = os.path.join(bench_path, "config", "redis_queue.acl") + acl_redis_cache_path = os.path.join(bench_path, "config", "redis_cache.acl") + open(acl_rq_path, "a").close() + open(acl_redis_cache_path, "a").close() + + +def write_redis_config(template_name, context, bench_path): + template = bench.config.env().get_template(template_name) + + if "config_path" not in context: + context["config_path"] = os.path.abspath(os.path.join(bench_path, "config")) + + if "pid_path" not in context: + context["pid_path"] = os.path.join(context["config_path"], "pids") + + with open(os.path.join(bench_path, "config", template_name), "w") as f: + f.write(template.render(**context)) + + +def get_redis_version(): + import semantic_version + + version_string = subprocess.check_output("redis-server --version", shell=True) + version_string = version_string.decode("utf-8").strip() + # extract version number from string + version = re.findall(r"\d+\.\d+", version_string) + if not version: + return None + + version = semantic_version.Version(version[0], partial=True) + return float(f"{version.major}.{version.minor}") + + +def get_max_redis_memory(): + try: + max_mem = os.sysconf("SC_PAGE_SIZE") * os.sysconf("SC_PHYS_PAGES") + except ValueError: + max_mem = int(subprocess.check_output(["sysctl", "-n", "hw.memsize"]).strip()) + return max(50, int((max_mem / (1024.0**2)) * 0.05)) diff --git a/bench/config/site_config.py b/bench/config/site_config.py new file mode 100644 index 0000000..b8bc01c --- /dev/null +++ b/bench/config/site_config.py @@ -0,0 +1,129 @@ +# imports - standard imports +import json +import os +from collections import defaultdict + + +def get_site_config(site, bench_path="."): + config_path = os.path.join(bench_path, "sites", site, "site_config.json") + if not os.path.exists(config_path): + return {} + with open(config_path) as f: + return json.load(f) + + +def put_site_config(site, config, bench_path="."): + config_path = os.path.join(bench_path, "sites", site, "site_config.json") + with open(config_path, "w") as f: + return json.dump(config, f, indent=1) + + +def update_site_config(site, new_config, bench_path="."): + config = get_site_config(site, bench_path=bench_path) + config.update(new_config) + put_site_config(site, config, bench_path=bench_path) + + +def set_nginx_port(site, port, bench_path=".", gen_config=True): + set_site_config_nginx_property( + site, {"nginx_port": port}, bench_path=bench_path, gen_config=gen_config + ) + + +def set_ssl_certificate(site, ssl_certificate, bench_path=".", gen_config=True): + set_site_config_nginx_property( + site, + {"ssl_certificate": ssl_certificate}, + bench_path=bench_path, + gen_config=gen_config, + ) + + +def set_ssl_certificate_key(site, ssl_certificate_key, bench_path=".", gen_config=True): + set_site_config_nginx_property( + site, + {"ssl_certificate_key": ssl_certificate_key}, + bench_path=bench_path, + gen_config=gen_config, + ) + + +def set_site_config_nginx_property(site, config, bench_path=".", gen_config=True): + from bench.config.nginx import make_nginx_conf + from bench.bench import Bench + + if site not in Bench(bench_path).sites: + raise Exception("No such site") + update_site_config(site, config, bench_path=bench_path) + if gen_config: + make_nginx_conf(bench_path=bench_path) + + +def set_url_root(site, url_root, bench_path="."): + update_site_config(site, {"host_name": url_root}, bench_path=bench_path) + + +def add_domain(site, domain, ssl_certificate, ssl_certificate_key, bench_path="."): + domains = get_domains(site, bench_path) + for d in domains: + if (isinstance(d, dict) and d["domain"] == domain) or d == domain: + print(f"Domain {domain} already exists") + return + + if ssl_certificate_key and ssl_certificate: + domain = { + "domain": domain, + "ssl_certificate": ssl_certificate, + "ssl_certificate_key": ssl_certificate_key, + } + + domains.append(domain) + update_site_config(site, {"domains": domains}, bench_path=bench_path) + + +def remove_domain(site, domain, bench_path="."): + domains = get_domains(site, bench_path) + for i, d in enumerate(domains): + if (isinstance(d, dict) and d["domain"] == domain) or d == domain: + domains.remove(d) + break + + update_site_config(site, {"domains": domains}, bench_path=bench_path) + + +def sync_domains(site, domains, bench_path="."): + """Checks if there is a change in domains. If yes, updates the domains list.""" + changed = False + existing_domains = get_domains_dict(get_domains(site, bench_path)) + new_domains = get_domains_dict(domains) + + if set(existing_domains.keys()) != set(new_domains.keys()): + changed = True + + else: + for d in list(existing_domains.values()): + if d != new_domains.get(d["domain"]): + changed = True + break + + if changed: + # replace existing domains with this one + update_site_config(site, {"domains": domains}, bench_path=".") + + return changed + + +def get_domains(site, bench_path="."): + return get_site_config(site, bench_path=bench_path).get("domains") or [] + + +def get_domains_dict(domains): + domains_dict = defaultdict(dict) + for d in domains: + if isinstance(d, str): + domains_dict[d] = {"domain": d} + + elif isinstance(d, dict): + domains_dict[d["domain"]] = d + + return domains_dict diff --git a/bench/config/supervisor.py b/bench/config/supervisor.py new file mode 100644 index 0000000..9373607 --- /dev/null +++ b/bench/config/supervisor.py @@ -0,0 +1,167 @@ +# imports - standard imports +import getpass +import logging +import os + +# imports - third party imports +import click + +# imports - module imports +import bench +from bench.app import use_rq +from bench.bench import Bench +from bench.config.common_site_config import ( + compute_max_requests_jitter, + get_config, + get_default_max_requests, + get_gunicorn_workers, + update_config, +) +from bench.utils import get_bench_name, which + +logger = logging.getLogger(bench.PROJECT_NAME) + + +def generate_supervisor_config(bench_path, user=None, yes=False, skip_redis=False): + """Generate supervisor config for respective bench path""" + if not user: + user = getpass.getuser() + + config = Bench(bench_path).conf + template = bench.config.env().get_template("supervisor.conf") + bench_dir = os.path.abspath(bench_path) + + web_worker_count = config.get( + "gunicorn_workers", get_gunicorn_workers()["gunicorn_workers"] + ) + max_requests = config.get( + "gunicorn_max_requests", get_default_max_requests(web_worker_count) + ) + + config = template.render( + **{ + "bench_dir": bench_dir, + "sites_dir": os.path.join(bench_dir, "sites"), + "user": user, + "use_rq": use_rq(bench_path), + "http_timeout": config.get("http_timeout", 120), + "redis_server": which("redis-server"), + "node": which("node") or which("nodejs"), + "redis_cache_config": os.path.join(bench_dir, "config", "redis_cache.conf"), + "redis_queue_config": os.path.join(bench_dir, "config", "redis_queue.conf"), + "webserver_port": config.get("webserver_port", 8000), + "gunicorn_workers": web_worker_count, + "gunicorn_max_requests": max_requests, + "gunicorn_max_requests_jitter": compute_max_requests_jitter(max_requests), + "bench_name": get_bench_name(bench_path), + "background_workers": config.get("background_workers") or 1, + "bench_cmd": which("bench"), + "skip_redis": skip_redis, + "workers": config.get("workers", {}), + "multi_queue_consumption": can_enable_multi_queue_consumption(bench_path), + "supervisor_startretries": 10, + } + ) + + conf_path = os.path.join(bench_path, "config", "supervisor.conf") + if not yes and os.path.exists(conf_path): + click.confirm( + "supervisor.conf already exists and this will overwrite it. Do you want to continue?", + abort=True, + ) + + with open(conf_path, "w") as f: + f.write(config) + + update_config({"restart_supervisor_on_update": True}, bench_path=bench_path) + update_config({"restart_systemd_on_update": False}, bench_path=bench_path) + sync_socketio_port(bench_path) + + +def get_supervisord_conf(): + """Returns path of supervisord config from possible paths""" + possibilities = ( + "supervisord.conf", + "etc/supervisord.conf", + "/etc/supervisord.conf", + "/etc/supervisor/supervisord.conf", + "/etc/supervisord.conf", + ) + + for possibility in possibilities: + if os.path.exists(possibility): + return possibility + + +def sync_socketio_port(bench_path): + # Backward compatbility: always keep redis_cache and redis_socketio port same + common_config = get_config(bench_path=bench_path) + + socketio_port = common_config.get("redis_socketio") + cache_port = common_config.get("redis_cache") + if socketio_port and socketio_port != cache_port: + update_config({"redis_socketio": cache_port}) + + +def can_enable_multi_queue_consumption(bench_path: str) -> bool: + try: + from semantic_version import Version + + from bench.utils.app import get_current_version + + supported_version = Version(major=14, minor=18, patch=0) + + xhiveframework_version = Version(get_current_version("xhiveframework", bench_path=bench_path)) + + return xhiveframework_version > supported_version + except Exception: + return False + + +def check_supervisord_config(user=None): + """From bench v5.x, we're moving to supervisor running as user""" + # i don't think bench should be responsible for this but we're way past this now... + # removed updating supervisord conf & reload in Aug 2022 - gavin@xhiveframework.io + import configparser + + if not user: + user = getpass.getuser() + + supervisord_conf = get_supervisord_conf() + section = "unix_http_server" + updated_values = {"chmod": "0760", "chown": f"{user}:{user}"} + supervisord_conf_changes = "" + + if not supervisord_conf: + logger.log("supervisord.conf not found") + return + + config = configparser.ConfigParser() + config.read(supervisord_conf) + + if section not in config.sections(): + config.add_section(section) + action = f"Section {section} Added" + logger.log(action) + supervisord_conf_changes += "\n" + action + + for key, value in updated_values.items(): + try: + current_value = config.get(section, key) + except configparser.NoOptionError: + current_value = "" + + if current_value.strip() != value: + config.set(section, key, value) + action = ( + f"Updated supervisord.conf: '{key}' changed from '{current_value}' to '{value}'" + ) + logger.log(action) + supervisord_conf_changes += "\n" + action + + if not supervisord_conf_changes: + logger.error("supervisord.conf not updated") + contents = "\n".join(f"{x}={y}" for x, y in updated_values.items()) + print( + f"Update your {supervisord_conf} with the following values:\n[{section}]\n{contents}" + ) diff --git a/bench/config/systemd.py b/bench/config/systemd.py new file mode 100644 index 0000000..1e4822f --- /dev/null +++ b/bench/config/systemd.py @@ -0,0 +1,309 @@ +# imports - standard imports +import getpass +import os + +# imports - third partyimports +import click + +# imports - module imports +import bench +from bench.app import use_rq +from bench.bench import Bench +from bench.config.common_site_config import ( + get_gunicorn_workers, + update_config, + get_default_max_requests, + compute_max_requests_jitter, +) +from bench.utils import exec_cmd, which, get_bench_name + + +def generate_systemd_config( + bench_path, + user=None, + yes=False, + stop=False, + create_symlinks=False, + delete_symlinks=False, +): + + if not user: + user = getpass.getuser() + + config = Bench(bench_path).conf + + bench_dir = os.path.abspath(bench_path) + bench_name = get_bench_name(bench_path) + + if stop: + exec_cmd( + f"sudo systemctl stop -- $(systemctl show -p Requires {bench_name}.target | cut -d= -f2)" + ) + return + + if create_symlinks: + _create_symlinks(bench_path) + return + + if delete_symlinks: + _delete_symlinks(bench_path) + return + + number_of_workers = config.get("background_workers") or 1 + background_workers = [] + for i in range(number_of_workers): + background_workers.append( + get_bench_name(bench_path) + "-xhiveframework-default-worker@" + str(i + 1) + ".service" + ) + + for i in range(number_of_workers): + background_workers.append( + get_bench_name(bench_path) + "-xhiveframework-short-worker@" + str(i + 1) + ".service" + ) + + for i in range(number_of_workers): + background_workers.append( + get_bench_name(bench_path) + "-xhiveframework-long-worker@" + str(i + 1) + ".service" + ) + + web_worker_count = config.get( + "gunicorn_workers", get_gunicorn_workers()["gunicorn_workers"] + ) + max_requests = config.get( + "gunicorn_max_requests", get_default_max_requests(web_worker_count) + ) + + bench_info = { + "bench_dir": bench_dir, + "sites_dir": os.path.join(bench_dir, "sites"), + "user": user, + "use_rq": use_rq(bench_path), + "http_timeout": config.get("http_timeout", 120), + "redis_server": which("redis-server"), + "node": which("node") or which("nodejs"), + "redis_cache_config": os.path.join(bench_dir, "config", "redis_cache.conf"), + "redis_queue_config": os.path.join(bench_dir, "config", "redis_queue.conf"), + "webserver_port": config.get("webserver_port", 8000), + "gunicorn_workers": web_worker_count, + "gunicorn_max_requests": max_requests, + "gunicorn_max_requests_jitter": compute_max_requests_jitter(max_requests), + "bench_name": get_bench_name(bench_path), + "worker_target_wants": " ".join(background_workers), + "bench_cmd": which("bench"), + } + + if not yes: + click.confirm( + "current systemd configuration will be overwritten. Do you want to continue?", + abort=True, + ) + + setup_systemd_directory(bench_path) + setup_main_config(bench_info, bench_path) + setup_workers_config(bench_info, bench_path) + setup_web_config(bench_info, bench_path) + setup_redis_config(bench_info, bench_path) + + update_config({"restart_systemd_on_update": False}, bench_path=bench_path) + update_config({"restart_supervisor_on_update": False}, bench_path=bench_path) + + +def setup_systemd_directory(bench_path): + if not os.path.exists(os.path.join(bench_path, "config", "systemd")): + os.makedirs(os.path.join(bench_path, "config", "systemd")) + + +def setup_main_config(bench_info, bench_path): + # Main config + bench_template = bench.config.env().get_template("systemd/xhiveframework-bench.target") + bench_config = bench_template.render(**bench_info) + bench_config_path = os.path.join( + bench_path, "config", "systemd", bench_info.get("bench_name") + ".target" + ) + + with open(bench_config_path, "w") as f: + f.write(bench_config) + + +def setup_workers_config(bench_info, bench_path): + # Worker Group + bench_workers_target_template = bench.config.env().get_template( + "systemd/xhiveframework-bench-workers.target" + ) + bench_default_worker_template = bench.config.env().get_template( + "systemd/xhiveframework-bench-xhiveframework-default-worker.service" + ) + bench_short_worker_template = bench.config.env().get_template( + "systemd/xhiveframework-bench-xhiveframework-short-worker.service" + ) + bench_long_worker_template = bench.config.env().get_template( + "systemd/xhiveframework-bench-xhiveframework-long-worker.service" + ) + bench_schedule_worker_template = bench.config.env().get_template( + "systemd/xhiveframework-bench-xhiveframework-schedule.service" + ) + + bench_workers_target_config = bench_workers_target_template.render(**bench_info) + bench_default_worker_config = bench_default_worker_template.render(**bench_info) + bench_short_worker_config = bench_short_worker_template.render(**bench_info) + bench_long_worker_config = bench_long_worker_template.render(**bench_info) + bench_schedule_worker_config = bench_schedule_worker_template.render(**bench_info) + + bench_workers_target_config_path = os.path.join( + bench_path, "config", "systemd", bench_info.get("bench_name") + "-workers.target" + ) + bench_default_worker_config_path = os.path.join( + bench_path, + "config", + "systemd", + bench_info.get("bench_name") + "-xhiveframework-default-worker@.service", + ) + bench_short_worker_config_path = os.path.join( + bench_path, + "config", + "systemd", + bench_info.get("bench_name") + "-xhiveframework-short-worker@.service", + ) + bench_long_worker_config_path = os.path.join( + bench_path, + "config", + "systemd", + bench_info.get("bench_name") + "-xhiveframework-long-worker@.service", + ) + bench_schedule_worker_config_path = os.path.join( + bench_path, + "config", + "systemd", + bench_info.get("bench_name") + "-xhiveframework-schedule.service", + ) + + with open(bench_workers_target_config_path, "w") as f: + f.write(bench_workers_target_config) + + with open(bench_default_worker_config_path, "w") as f: + f.write(bench_default_worker_config) + + with open(bench_short_worker_config_path, "w") as f: + f.write(bench_short_worker_config) + + with open(bench_long_worker_config_path, "w") as f: + f.write(bench_long_worker_config) + + with open(bench_schedule_worker_config_path, "w") as f: + f.write(bench_schedule_worker_config) + + +def setup_web_config(bench_info, bench_path): + # Web Group + bench_web_target_template = bench.config.env().get_template( + "systemd/xhiveframework-bench-web.target" + ) + bench_web_service_template = bench.config.env().get_template( + "systemd/xhiveframework-bench-xhiveframework-web.service" + ) + bench_node_socketio_template = bench.config.env().get_template( + "systemd/xhiveframework-bench-node-socketio.service" + ) + + bench_web_target_config = bench_web_target_template.render(**bench_info) + bench_web_service_config = bench_web_service_template.render(**bench_info) + bench_node_socketio_config = bench_node_socketio_template.render(**bench_info) + + bench_web_target_config_path = os.path.join( + bench_path, "config", "systemd", bench_info.get("bench_name") + "-web.target" + ) + bench_web_service_config_path = os.path.join( + bench_path, "config", "systemd", bench_info.get("bench_name") + "-xhiveframework-web.service" + ) + bench_node_socketio_config_path = os.path.join( + bench_path, + "config", + "systemd", + bench_info.get("bench_name") + "-node-socketio.service", + ) + + with open(bench_web_target_config_path, "w") as f: + f.write(bench_web_target_config) + + with open(bench_web_service_config_path, "w") as f: + f.write(bench_web_service_config) + + with open(bench_node_socketio_config_path, "w") as f: + f.write(bench_node_socketio_config) + + +def setup_redis_config(bench_info, bench_path): + # Redis Group + bench_redis_target_template = bench.config.env().get_template( + "systemd/xhiveframework-bench-redis.target" + ) + bench_redis_cache_template = bench.config.env().get_template( + "systemd/xhiveframework-bench-redis-cache.service" + ) + bench_redis_queue_template = bench.config.env().get_template( + "systemd/xhiveframework-bench-redis-queue.service" + ) + + bench_redis_target_config = bench_redis_target_template.render(**bench_info) + bench_redis_cache_config = bench_redis_cache_template.render(**bench_info) + bench_redis_queue_config = bench_redis_queue_template.render(**bench_info) + + bench_redis_target_config_path = os.path.join( + bench_path, "config", "systemd", bench_info.get("bench_name") + "-redis.target" + ) + bench_redis_cache_config_path = os.path.join( + bench_path, "config", "systemd", bench_info.get("bench_name") + "-redis-cache.service" + ) + bench_redis_queue_config_path = os.path.join( + bench_path, "config", "systemd", bench_info.get("bench_name") + "-redis-queue.service" + ) + + with open(bench_redis_target_config_path, "w") as f: + f.write(bench_redis_target_config) + + with open(bench_redis_cache_config_path, "w") as f: + f.write(bench_redis_cache_config) + + with open(bench_redis_queue_config_path, "w") as f: + f.write(bench_redis_queue_config) + + +def _create_symlinks(bench_path): + bench_dir = os.path.abspath(bench_path) + etc_systemd_system = os.path.join("/", "etc", "systemd", "system") + config_path = os.path.join(bench_dir, "config", "systemd") + unit_files = get_unit_files(bench_dir) + for unit_file in unit_files: + filename = "".join(unit_file) + exec_cmd( + f'sudo ln -s {config_path}/{filename} {etc_systemd_system}/{"".join(unit_file)}' + ) + exec_cmd("sudo systemctl daemon-reload") + + +def _delete_symlinks(bench_path): + bench_dir = os.path.abspath(bench_path) + etc_systemd_system = os.path.join("/", "etc", "systemd", "system") + unit_files = get_unit_files(bench_dir) + for unit_file in unit_files: + exec_cmd(f'sudo rm {etc_systemd_system}/{"".join(unit_file)}') + exec_cmd("sudo systemctl daemon-reload") + + +def get_unit_files(bench_path): + bench_name = get_bench_name(bench_path) + unit_files = [ + [bench_name, ".target"], + [bench_name + "-workers", ".target"], + [bench_name + "-web", ".target"], + [bench_name + "-redis", ".target"], + [bench_name + "-xhiveframework-default-worker@", ".service"], + [bench_name + "-xhiveframework-short-worker@", ".service"], + [bench_name + "-xhiveframework-long-worker@", ".service"], + [bench_name + "-xhiveframework-schedule", ".service"], + [bench_name + "-xhiveframework-web", ".service"], + [bench_name + "-node-socketio", ".service"], + [bench_name + "-redis-cache", ".service"], + [bench_name + "-redis-queue", ".service"], + ] + return unit_files diff --git a/bench/config/templates/502.html b/bench/config/templates/502.html new file mode 100644 index 0000000..fe667da --- /dev/null +++ b/bench/config/templates/502.html @@ -0,0 +1,89 @@ + + + + + Sorry! We will be back soon. + + + +
+
+ sad-face-avatar-boy-man-11Created with Sketch. +
+
+

+ Sorry!
+ We will be back soon. +

+

+ Don't panic. It's not you, it's us.
+ Most likely, our engineers are updating the code, + and it should take a minute for the new code to load into memory.

+ Try refreshing after a minute or two. +

+
+
+
+ + diff --git a/bench/config/templates/Procfile b/bench/config/templates/Procfile new file mode 100644 index 0000000..2a7015f --- /dev/null +++ b/bench/config/templates/Procfile @@ -0,0 +1,18 @@ +{% if not skip_redis %} +redis_cache: redis-server config/redis_cache.conf +redis_queue: redis-server config/redis_queue.conf +{% endif %} +web: bench serve {% if webserver_port -%} --port {{ webserver_port }} {%- endif %} + +socketio: {{ node }} apps/xhiveframework/socketio.js + +{% if not CI %} +watch: bench watch +{% endif %} + +schedule: bench schedule +worker: {{ 'OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES NO_PROXY=*' if is_mac else '' }} bench worker 1>> logs/worker.log 2>> logs/worker.error.log +{% for worker_name, worker_details in workers.items() %} +worker_{{ worker_name }}: {{ 'OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES NO_PROXY=*' if is_mac else '' }} bench worker --queue {{ worker_name }} 1>> logs/worker.log 2>> logs/worker.error.log +{% endfor %} + diff --git a/bench/config/templates/bench_manager_nginx.conf b/bench/config/templates/bench_manager_nginx.conf new file mode 100644 index 0000000..abc0e6f --- /dev/null +++ b/bench/config/templates/bench_manager_nginx.conf @@ -0,0 +1,100 @@ +server { + listen {{ port }}; + server_name {{ domain }}; + root {{ sites_path }}; + + + {% if ssl_certificate and ssl_certificate_key %} + ssl on; + ssl_certificate {{ ssl_certificate }}; + ssl_certificate_key {{ ssl_certificate_key }}; + ssl_session_timeout 5m; + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; + ssl_ciphers "EECDH+ECDSA+AESGCM EECDH+aRSA+AESGCM EECDH+ECDSA+SHA384 EECDH+ECDSA+SHA256 EECDH+aRSA+SHA384 EECDH+aRSA+SHA256 EECDH+aRSA+RC4 EECDH EDH+aRSA RC4 !aNULL !eNULL !LOW !3DES !MD5 !EXP !PSK !SRP !DSS"; + ssl_prefer_server_ciphers on; + {% endif %} + + location /assets { + try_files $uri =404; + } + + location ~ ^/protected/(.*) { + internal; + try_files /{{ bench_manager_site_name }}/$1 =404; + } + + location /socket.io { + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header X-Xhiveframework-Site-Name {{ bench_manager_site_name }}; + proxy_set_header Origin $scheme://$http_host; + proxy_set_header Host {{ bench_manager_site_name }}; + + proxy_pass http://{{ bench_name }}-socketio-server; + } + + location / { + try_files /{{ bench_manager_site_name }}/public/$uri @webserver; + } + + location @webserver { + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Xhiveframework-Site-Name {{ bench_manager_site_name }}; + proxy_set_header Host {{ bench_manager_site_name }}; + proxy_set_header X-Use-X-Accel-Redirect True; + proxy_read_timeout {{ http_timeout or 120 }}; + proxy_redirect off; + + proxy_pass http://{{ bench_name }}-xhiveframework; + } + + # error pages + {% for error_code, error_page in error_pages.items() -%} + + error_page {{ error_code }} /{{ error_page.split('/')[-1] }}; + location /{{ error_code }}.html { + root {{ '/'.join(error_page.split('/')[:-1]) }}; + internal; + } + + {% endfor -%} + + # optimizations + sendfile on; + keepalive_timeout 15; + client_max_body_size 50m; + client_body_buffer_size 16K; + client_header_buffer_size 1k; + + # enable gzip compresion + # based on https://mattstauffer.co/blog/enabling-gzip-on-nginx-servers-including-laravel-forge + gzip on; + gzip_http_version 1.1; + gzip_comp_level 5; + gzip_min_length 256; + gzip_proxied any; + gzip_vary on; + gzip_types + application/atom+xml + application/javascript + application/json + application/rss+xml + application/vnd.ms-fontobject + application/x-font-ttf + application/font-woff + application/x-web-app-manifest+json + application/xhtml+xml + application/xml + font/opentype + image/svg+xml + image/x-icon + text/css + text/plain + text/x-component + ; + # text/html is always compressed by HttpGzipModule +} + + diff --git a/bench/config/templates/frappe_sudoers b/bench/config/templates/frappe_sudoers new file mode 100644 index 0000000..927eb54 --- /dev/null +++ b/bench/config/templates/frappe_sudoers @@ -0,0 +1,20 @@ +# This file is auto-generated by xhiveframework/bench +# To re-generate this file, run "bench setup sudoers" + +{% if service %} +{{ user }} ALL = (root) {{ service }} +{{ user }} ALL = (root) NOPASSWD: {{ service }} nginx * +{% endif %} + +{% if systemctl %} +{{ user }} ALL = (root) {{ systemctl }} +{{ user }} ALL = (root) NOPASSWD: {{ systemctl }} * nginx +{% endif %} + +{% if nginx %} +{{ user }} ALL = (root) NOPASSWD: {{ nginx }} +{% endif %} + +{{ user }} ALL = (root) NOPASSWD: {{ certbot }} +Defaults:{{ user }} !requiretty + diff --git a/bench/config/templates/letsencrypt.cfg b/bench/config/templates/letsencrypt.cfg new file mode 100755 index 0000000..cde2520 --- /dev/null +++ b/bench/config/templates/letsencrypt.cfg @@ -0,0 +1,19 @@ +# This is an example of the kind of things you can do in a configuration file. +# All flags used by the client can be configured here. Run Certbot with +# "--help" to learn more about the available options. + +# Use a 4096 bit RSA key instead of 2048 +rsa-key-size = 4096 + +# Uncomment and update to register with the specified e-mail address +#email = email@domain.com + +# Uncomment and update to generate certificates for the specified +# domains. +domains = {{ domain }} + +# Uncomment to use a text interface instead of ncurses +text = True + +# Uncomment to use the standalone authenticator on port 443 +authenticator = standalone diff --git a/bench/config/templates/nginx.conf b/bench/config/templates/nginx.conf new file mode 100644 index 0000000..fea6925 --- /dev/null +++ b/bench/config/templates/nginx.conf @@ -0,0 +1,237 @@ +{%- macro nginx_map(from_variable, to_variable, values, default) %} +map {{ from_variable }} {{ to_variable }} { + {% for (from, to) in values.items() -%} + {{ from }} {{ to }}; + {% endfor %} + + {%- if default -%} + default {{ default }}; + {% endif %} +} +{%- endmacro %} + +{%- macro server_block(bench_name, port, server_names, site_name, sites_path, ssl_certificate, ssl_certificate_key) %} +server { + {% if ssl_certificate and ssl_certificate_key %} + listen {{ port }} ssl; + listen [::]:{{ port }} ssl; + {% else %} + listen {{ port }}; + listen [::]:{{ port }}; + {% endif %} + + server_name + {% for name in server_names -%} + {{ name }} + {% endfor -%} + ; + + root {{ sites_path }}; + + {% if allow_rate_limiting %} + limit_conn per_host_{{ bench_name_hash }} 8; + {% endif %} + + proxy_buffer_size 128k; + proxy_buffers 4 256k; + proxy_busy_buffers_size 256k; + + {% if ssl_certificate and ssl_certificate_key %} + ssl_certificate {{ ssl_certificate }}; + ssl_certificate_key {{ ssl_certificate_key }}; + ssl_session_timeout 5m; + ssl_session_cache shared:SSL:10m; + ssl_session_tickets off; + ssl_stapling on; + ssl_stapling_verify on; + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers EECDH+AESGCM:EDH+AESGCM; + ssl_ecdh_curve secp384r1; + ssl_prefer_server_ciphers on; + {% endif %} + + add_header X-Frame-Options "SAMEORIGIN"; + add_header Strict-Transport-Security "max-age=63072000; includeSubDomains; preload"; + add_header X-Content-Type-Options nosniff; + add_header X-XSS-Protection "1; mode=block"; + add_header Referrer-Policy "same-origin, strict-origin-when-cross-origin"; + + location /assets { + try_files $uri =404; + add_header Cache-Control "max-age=31536000"; + } + + location ~ ^/protected/(.*) { + internal; + try_files /{{ site_name }}/$1 =404; + } + + location /socket.io { + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "upgrade"; + proxy_set_header X-Xhiveframework-Site-Name {{ site_name }}; + proxy_set_header Origin $scheme://$http_host; + proxy_set_header Host $host; + + proxy_pass http://{{ bench_name }}-socketio-server; + } + + location / { + + rewrite ^(.+)/$ $1 permanent; + rewrite ^(.+)/index\.html$ $1 permanent; + rewrite ^(.+)\.html$ $1 permanent; + + location ~* ^/files/.*.(htm|html|svg|xml) { + add_header Content-disposition "attachment"; + try_files /{{ site_name }}/public/$uri @webserver; + } + + try_files /{{ site_name }}/public/$uri @webserver; + } + + location @webserver { + proxy_http_version 1.1; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Xhiveframework-Site-Name {{ site_name }}; + proxy_set_header Host $host; + proxy_set_header X-Use-X-Accel-Redirect True; + proxy_read_timeout {{ http_timeout or 120 }}; + proxy_redirect off; + + proxy_pass http://{{ bench_name }}-xhiveframework; + } + + # error pages + {% for error_code, error_page in error_pages.items() -%} + + error_page {{ error_code }} /{{ error_page.split('/')[-1] }}; + location /{{ error_code }}.html { + root {{ '/'.join(error_page.split('/')[:-1]) }}; + internal; + } + + {% endfor -%} + + {% if logging %} + {%- if logging.level == "site" -%} + + access_log /var/log/nginx/{{ site_name }}_access.log {{ logging.log_format }}; + error_log /var/log/nginx/{{ site_name }}_error.log; + + {%- elif logging.level == "combined" -%} + + access_log /var/log/nginx/access.log {{ logging.log_format }}; + error_log /var/log/nginx/error.log; + + {%- endif %} + {%- endif %} + + # optimizations + sendfile on; + keepalive_timeout 15; + client_max_body_size 50m; + client_body_buffer_size 16K; + client_header_buffer_size 1k; + + # enable gzip compresion + # based on https://mattstauffer.co/blog/enabling-gzip-on-nginx-servers-including-laravel-forge + gzip on; + gzip_http_version 1.1; + gzip_comp_level 5; + gzip_min_length 256; + gzip_proxied any; + gzip_vary on; + gzip_types + application/atom+xml + application/javascript + application/json + application/rss+xml + application/vnd.ms-fontobject + application/x-font-ttf + application/font-woff + application/x-web-app-manifest+json + application/xhtml+xml + application/xml + font/opentype + image/svg+xml + image/x-icon + text/css + text/plain + text/x-component + ; + # text/html is always compressed by HttpGzipModule +} + +{% if ssl_certificate and ssl_certificate_key -%} + # http to https redirect + server { + listen 80; + server_name + {% for name in server_names -%} + {{ name }} + {% endfor -%} + ; + + return 301 https://$host$request_uri; + } + +{% endif %} + +{%- endmacro -%} + +upstream {{ bench_name }}-xhiveframework { + server 127.0.0.1:{{ webserver_port or 8000 }} fail_timeout=0; +} + +upstream {{ bench_name}}-socketio-server { + server 127.0.0.1:{{ socketio_port or 3000 }} fail_timeout=0; +} + +{% if allow_rate_limiting %} +limit_conn_zone $host zone=per_host_{{ bench_name_hash }}:{{ limit_conn_shared_memory }}m; +{% endif %} + +# setup maps +{%- set site_name_variable="$host" %} +{% if sites.domain_map -%} + {# we append these variables with a random string as there could be multiple benches #} + {%- set site_name_variable="$site_name_{0}".format(random_string) -%} + {{ nginx_map(from_variable="$host", to_variable=site_name_variable, values=sites.domain_map, default="$host") }} +{%- endif %} + +# server blocks +{% if sites.that_use_dns -%} + + {{ server_block(bench_name, port=80, server_names=sites.that_use_dns, site_name=site_name_variable, sites_path=sites_path) }} + +{%- endif %} + +{% if sites.that_use_wildcard_ssl -%} + + {{ server_block(bench_name, port=443, server_names=sites.that_use_wildcard_ssl, + site_name=site_name_variable, sites_path=sites_path, + ssl_certificate=sites.wildcard_ssl_certificate, + ssl_certificate_key=sites.wildcard_ssl_certificate_key) }} + +{%- endif %} + +{%- if sites.that_use_ssl -%} + {% for site in sites.that_use_ssl -%} + + {{ server_block(bench_name, port=443, server_names=[site.domain or site.name], + site_name=site_name_variable, sites_path=sites_path, + ssl_certificate=site.ssl_certificate, ssl_certificate_key=site.ssl_certificate_key) }} + + {% endfor %} +{%- endif %} + +{% if sites.that_use_port -%} + {%- for site in sites.that_use_port -%} + + {{ server_block(bench_name, port=site.port, server_names=[site.name], site_name=site.name, sites_path=sites_path) }} + + {%- endfor %} +{% endif %} diff --git a/bench/config/templates/nginx_default.conf b/bench/config/templates/nginx_default.conf new file mode 100644 index 0000000..3c714ea --- /dev/null +++ b/bench/config/templates/nginx_default.conf @@ -0,0 +1,46 @@ +# For more information on configuration, see: +# * Official English Documentation: http://nginx.org/en/docs/ +# * Official Russian Documentation: http://nginx.org/ru/docs/ + +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log; +#error_log /var/log/nginx/error.log notice; +#error_log /var/log/nginx/error.log info; + +pid /run/nginx.pid; + + +events { + worker_connections 1024; +} + + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + #tcp_nopush on; + + #keepalive_timeout 0; + keepalive_timeout 65; + + server_names_hash_bucket_size 64; + + #gzip on; + + index index.html index.htm; + + # Load modular configuration files from the /etc/nginx/conf.d directory. + # See http://nginx.org/en/docs/ngx_core_module.html#include + # for more information. + include /etc/nginx/conf.d/*.conf; +} diff --git a/bench/config/templates/redis_cache.conf b/bench/config/templates/redis_cache.conf new file mode 100644 index 0000000..bb89b90 --- /dev/null +++ b/bench/config/templates/redis_cache.conf @@ -0,0 +1,14 @@ +dbfilename redis_cache.rdb +dir {{ pid_path }} +pidfile {{ pid_path }}/redis_cache.pid +bind 127.0.0.1 +port {{ port }} +maxmemory {{ maxmemory }}mb +maxmemory-policy allkeys-lru +appendonly no +{% if redis_version and redis_version >= 2.2 %} +save "" +{% endif %} +{% if redis_version and redis_version >= 6.0 %} +aclfile {{ config_path }}/redis_cache.acl +{% endif %} diff --git a/bench/config/templates/redis_queue.conf b/bench/config/templates/redis_queue.conf new file mode 100644 index 0000000..f4667c9 --- /dev/null +++ b/bench/config/templates/redis_queue.conf @@ -0,0 +1,8 @@ +dbfilename redis_queue.rdb +dir {{ pid_path }} +pidfile {{ pid_path }}/redis_queue.pid +bind 127.0.0.1 +port {{ port }} +{% if redis_version and redis_version >= 6.0 %} +aclfile {{ config_path }}/redis_queue.acl +{% endif %} diff --git a/bench/config/templates/supervisor.conf b/bench/config/templates/supervisor.conf new file mode 100644 index 0000000..93d77d0 --- /dev/null +++ b/bench/config/templates/supervisor.conf @@ -0,0 +1,151 @@ +; Notes: +; priority=1 --> Lower priorities indicate programs that start first and shut down last +; killasgroup=true --> send kill signal to child processes too + +; graceful timeout should always be lower than stopwaitsecs to avoid orphan gunicorn workers. +[program:{{ bench_name }}-xhiveframework-web] +command={{ bench_dir }}/env/bin/gunicorn -b 127.0.0.1:{{ webserver_port }} -w {{ gunicorn_workers }} --max-requests {{ gunicorn_max_requests }} --max-requests-jitter {{ gunicorn_max_requests_jitter }} -t {{ http_timeout }} --graceful-timeout 30 xhiveframework.app:application --preload +priority=4 +autostart=true +autorestart=true +stdout_logfile={{ bench_dir }}/logs/web.log +stderr_logfile={{ bench_dir }}/logs/web.error.log +stopwaitsecs=40 +killasgroup=true +user={{ user }} +directory={{ sites_dir }} +startretries={{ supervisor_startretries }} + +[program:{{ bench_name }}-xhiveframework-schedule] +command={{ bench_cmd }} schedule +priority=3 +autostart=true +autorestart=true +stdout_logfile={{ bench_dir }}/logs/schedule.log +stderr_logfile={{ bench_dir }}/logs/schedule.error.log +user={{ user }} +directory={{ bench_dir }} +startretries={{ supervisor_startretries }} + +{% if not multi_queue_consumption %} +[program:{{ bench_name }}-xhiveframework-default-worker] +command={{ bench_cmd }} worker --queue default +priority=4 +autostart=true +autorestart=true +stdout_logfile={{ bench_dir }}/logs/worker.log +stderr_logfile={{ bench_dir }}/logs/worker.error.log +user={{ user }} +stopwaitsecs=1560 +directory={{ bench_dir }} +killasgroup=true +numprocs={{ background_workers }} +process_name=%(program_name)s-%(process_num)d +startretries={{ supervisor_startretries }} +{% endif %} + +[program:{{ bench_name }}-xhiveframework-short-worker] +command={{ bench_cmd }} worker --queue short{{',default' if multi_queue_consumption else ''}} +priority=4 +autostart=true +autorestart=true +stdout_logfile={{ bench_dir }}/logs/worker.log +stderr_logfile={{ bench_dir }}/logs/worker.error.log +user={{ user }} +stopwaitsecs=360 +directory={{ bench_dir }} +killasgroup=true +numprocs={{ background_workers }} +process_name=%(program_name)s-%(process_num)d +startretries={{ supervisor_startretries }} + +[program:{{ bench_name }}-xhiveframework-long-worker] +command={{ bench_cmd }} worker --queue long{{',default,short' if multi_queue_consumption else ''}} +priority=4 +autostart=true +autorestart=true +stdout_logfile={{ bench_dir }}/logs/worker.log +stderr_logfile={{ bench_dir }}/logs/worker.error.log +user={{ user }} +stopwaitsecs=1560 +directory={{ bench_dir }} +killasgroup=true +numprocs={{ background_workers }} +process_name=%(program_name)s-%(process_num)d +startretries={{ supervisor_startretries }} + +{% for worker_name, worker_details in workers.items() %} +[program:{{ bench_name }}-xhiveframework-{{ worker_name }}-worker] +command={{ bench_cmd }} worker --queue {{ worker_name }} +priority=4 +autostart=true +autorestart=true +stdout_logfile={{ bench_dir }}/logs/worker.log +stderr_logfile={{ bench_dir }}/logs/worker.error.log +user={{ user }} +stopwaitsecs={{ worker_details["timeout"] }} +directory={{ bench_dir }} +killasgroup=true +numprocs={{ worker_details["background_workers"] or background_workers }} +process_name=%(program_name)s-%(process_num)d +startretries={{ supervisor_startretries }} +{% endfor %} + + +{% if not skip_redis %} +[program:{{ bench_name }}-redis-cache] +command={{ redis_server }} {{ redis_cache_config }} +priority=1 +autostart=true +autorestart=true +stdout_logfile={{ bench_dir }}/logs/redis-cache.log +stderr_logfile={{ bench_dir }}/logs/redis-cache.error.log +user={{ user }} +directory={{ sites_dir }} +startretries={{ supervisor_startretries }} + +[program:{{ bench_name }}-redis-queue] +command={{ redis_server }} {{ redis_queue_config }} +priority=1 +autostart=true +autorestart=true +stdout_logfile={{ bench_dir }}/logs/redis-queue.log +stderr_logfile={{ bench_dir }}/logs/redis-queue.error.log +user={{ user }} +directory={{ sites_dir }} +startretries={{ supervisor_startretries }} +{% endif %} + +{% if node %} +[program:{{ bench_name }}-node-socketio] +command={{ node }} {{ bench_dir }}/apps/xhiveframework/socketio.js +priority=4 +autostart=true +autorestart=true +stdout_logfile={{ bench_dir }}/logs/node-socketio.log +stderr_logfile={{ bench_dir }}/logs/node-socketio.error.log +user={{ user }} +directory={{ bench_dir }} +startretries={{ supervisor_startretries }} +{% endif %} + +[group:{{ bench_name }}-web] +programs={{ bench_name }}-xhiveframework-web {%- if node -%} ,{{ bench_name }}-node-socketio {%- endif%} + + +{% if multi_queue_consumption %} + +[group:{{ bench_name }}-workers] +programs={{ bench_name }}-xhiveframework-schedule,{{ bench_name }}-xhiveframework-short-worker,{{ bench_name }}-xhiveframework-long-worker{%- for worker_name in workers -%},{{ bench_name }}-xhiveframework-{{ worker_name }}-worker{%- endfor %} + +{% else %} + +[group:{{ bench_name }}-workers] +programs={{ bench_name }}-xhiveframework-schedule,{{ bench_name }}-xhiveframework-default-worker,{{ bench_name }}-xhiveframework-short-worker,{{ bench_name }}-xhiveframework-long-worker{%- for worker_name in workers -%},{{ bench_name }}-xhiveframework-{{ worker_name }}-worker{%- endfor %} + +{% endif %} + +{% if not skip_redis %} +[group:{{ bench_name }}-redis] +programs={{ bench_name }}-redis-cache,{{ bench_name }}-redis-queue +{% endif %} diff --git a/bench/config/templates/systemd/frappe-bench-frappe-default-worker.service b/bench/config/templates/systemd/frappe-bench-frappe-default-worker.service new file mode 100644 index 0000000..9b2550d --- /dev/null +++ b/bench/config/templates/systemd/frappe-bench-frappe-default-worker.service @@ -0,0 +1,12 @@ +[Unit] +Description="{{ bench_name }}-xhiveframework-default-worker %I" +PartOf={{ bench_name }}-workers.target + +[Service] +User={{ user }} +Group={{ user }} +Restart=always +ExecStart={{ bench_cmd }} worker --queue default +StandardOutput=file:{{ bench_dir }}/logs/worker.log +StandardError=file:{{ bench_dir }}/logs/worker.error.log +WorkingDirectory={{ bench_dir }} diff --git a/bench/config/templates/systemd/frappe-bench-frappe-long-worker.service b/bench/config/templates/systemd/frappe-bench-frappe-long-worker.service new file mode 100644 index 0000000..2dbb15f --- /dev/null +++ b/bench/config/templates/systemd/frappe-bench-frappe-long-worker.service @@ -0,0 +1,12 @@ +[Unit] +Description="{{ bench_name }}-xhiveframework-short-worker %I" +PartOf={{ bench_name }}-workers.target + +[Service] +User={{ user }} +Group={{ user }} +Restart=always +ExecStart={{ bench_cmd }} worker --queue long +StandardOutput=file:{{ bench_dir }}/logs/worker.log +StandardError=file:{{ bench_dir }}/logs/worker.error.log +WorkingDirectory={{ bench_dir }} diff --git a/bench/config/templates/systemd/frappe-bench-frappe-schedule.service b/bench/config/templates/systemd/frappe-bench-frappe-schedule.service new file mode 100644 index 0000000..f956c34 --- /dev/null +++ b/bench/config/templates/systemd/frappe-bench-frappe-schedule.service @@ -0,0 +1,12 @@ +[Unit] +Description="{{ bench_name }}-xhiveframework-schedule" +PartOf={{ bench_name }}-workers.target + +[Service] +User={{ user }} +Group={{ user }} +Restart=always +ExecStart={{ bench_cmd }} schedule +StandardOutput=file:{{ bench_dir }}/logs/schedule.log +StandardError=file:{{ bench_dir }}/logs/schedule.error.log +WorkingDirectory={{ bench_dir }} diff --git a/bench/config/templates/systemd/frappe-bench-frappe-short-worker.service b/bench/config/templates/systemd/frappe-bench-frappe-short-worker.service new file mode 100644 index 0000000..d2e1bfa --- /dev/null +++ b/bench/config/templates/systemd/frappe-bench-frappe-short-worker.service @@ -0,0 +1,12 @@ +[Unit] +Description="{{ bench_name }}-xhiveframework-short-worker %I" +PartOf={{ bench_name }}-workers.target + +[Service] +User={{ user }} +Group={{ user }} +Restart=always +ExecStart={{ bench_cmd }} worker --queue short +StandardOutput=file:{{ bench_dir }}/logs/worker.log +StandardError=file:{{ bench_dir }}/logs/worker.error.log +WorkingDirectory={{ bench_dir }} diff --git a/bench/config/templates/systemd/frappe-bench-frappe-web.service b/bench/config/templates/systemd/frappe-bench-frappe-web.service new file mode 100644 index 0000000..742790e --- /dev/null +++ b/bench/config/templates/systemd/frappe-bench-frappe-web.service @@ -0,0 +1,12 @@ +[Unit] +Description="{{ bench_name }}-xhiveframework-web" +PartOf={{ bench_name }}-web.target + +[Service] +User={{ user }} +Group={{ user }} +Restart=always +ExecStart={{ bench_dir }}/env/bin/gunicorn -b 127.0.0.1:{{ webserver_port }} -w {{ gunicorn_workers }} -t {{ http_timeout }} --max-requests {{ gunicorn_max_requests }} --max-requests-jitter {{ gunicorn_max_requests_jitter }} xhiveframework.app:application --preload +StandardOutput=file:{{ bench_dir }}/logs/web.log +StandardError=file:{{ bench_dir }}/logs/web.error.log +WorkingDirectory={{ sites_dir }} diff --git a/bench/config/templates/systemd/frappe-bench-node-socketio.service b/bench/config/templates/systemd/frappe-bench-node-socketio.service new file mode 100644 index 0000000..cf8dcd3 --- /dev/null +++ b/bench/config/templates/systemd/frappe-bench-node-socketio.service @@ -0,0 +1,13 @@ +[Unit] +After={{ bench_name }}-xhiveframework-web.service +Description="{{ bench_name }}-node-socketio" +PartOf={{ bench_name }}-web.target + +[Service] +User={{ user }} +Group={{ user }} +Restart=always +ExecStart={{ node }} {{ bench_dir }}/apps/xhiveframework/socketio.js +StandardOutput=file:{{ bench_dir }}/logs/node-socketio.log +StandardError=file:{{ bench_dir }}/logs/node-socketio.error.log +WorkingDirectory={{ bench_dir }} diff --git a/bench/config/templates/systemd/frappe-bench-redis-cache.service b/bench/config/templates/systemd/frappe-bench-redis-cache.service new file mode 100644 index 0000000..025f749 --- /dev/null +++ b/bench/config/templates/systemd/frappe-bench-redis-cache.service @@ -0,0 +1,12 @@ +[Unit] +Description="{{ bench_name }}-redis-cache" +PartOf={{ bench_name }}-redis.target + +[Service] +User={{ user }} +Group={{ user }} +Restart=always +ExecStart={{ redis_server }} {{ redis_cache_config }} +StandardOutput=file:{{ bench_dir }}/logs/redis-cache.log +StandardError=file:{{ bench_dir }}/logs/redis-cache.error.log +WorkingDirectory={{ sites_dir }} diff --git a/bench/config/templates/systemd/frappe-bench-redis-queue.service b/bench/config/templates/systemd/frappe-bench-redis-queue.service new file mode 100644 index 0000000..1d773da --- /dev/null +++ b/bench/config/templates/systemd/frappe-bench-redis-queue.service @@ -0,0 +1,12 @@ +[Unit] +Description="{{ bench_name }}-redis-queue" +PartOf={{ bench_name }}-redis.target + +[Service] +User={{ user }} +Group={{ user }} +Restart=always +ExecStart={{ redis_server }} {{ redis_queue_config }} +StandardOutput=file:{{ bench_dir }}/logs/redis-queue.log +StandardError=file:{{ bench_dir }}/logs/redis-queue.error.log +WorkingDirectory={{ sites_dir }} diff --git a/bench/config/templates/systemd/frappe-bench-redis.target b/bench/config/templates/systemd/frappe-bench-redis.target new file mode 100644 index 0000000..8879942 --- /dev/null +++ b/bench/config/templates/systemd/frappe-bench-redis.target @@ -0,0 +1,6 @@ +[Unit] +After=network.target +Wants={{ bench_name }}-redis-cache.service {{ bench_name }}-redis-queue.service + +[Install] +WantedBy=multi-user.target diff --git a/bench/config/templates/systemd/frappe-bench-web.target b/bench/config/templates/systemd/frappe-bench-web.target new file mode 100644 index 0000000..fb090a4 --- /dev/null +++ b/bench/config/templates/systemd/frappe-bench-web.target @@ -0,0 +1,6 @@ +[Unit] +After=network.target +Wants={{ bench_name }}-xhiveframework-web.service {{ bench_name }}-node-socketio.service + +[Install] +WantedBy=multi-user.target diff --git a/bench/config/templates/systemd/frappe-bench-workers.target b/bench/config/templates/systemd/frappe-bench-workers.target new file mode 100644 index 0000000..270bf20 --- /dev/null +++ b/bench/config/templates/systemd/frappe-bench-workers.target @@ -0,0 +1,6 @@ +[Unit] +After=network.target +Wants={{ worker_target_wants }} + +[Install] +WantedBy=multi-user.target diff --git a/bench/config/templates/systemd/frappe-bench.target b/bench/config/templates/systemd/frappe-bench.target new file mode 100644 index 0000000..7d76823 --- /dev/null +++ b/bench/config/templates/systemd/frappe-bench.target @@ -0,0 +1,6 @@ +[Unit] +After=network.target +Requires={{ bench_name }}-web.target {{ bench_name }}-workers.target {{ bench_name }}-redis.target + +[Install] +WantedBy=multi-user.target diff --git a/bench/exceptions.py b/bench/exceptions.py new file mode 100644 index 0000000..0465167 --- /dev/null +++ b/bench/exceptions.py @@ -0,0 +1,42 @@ +class InvalidBranchException(Exception): + pass + + +class InvalidRemoteException(Exception): + pass + + +class PatchError(Exception): + pass + + +class CommandFailedError(Exception): + pass + + +class BenchNotFoundError(Exception): + pass + + +class ValidationError(Exception): + pass + + +class AppNotInstalledError(ValidationError): + pass + + +class CannotUpdateReleaseBench(ValidationError): + pass + + +class FeatureDoesNotExistError(CommandFailedError): + pass + + +class NotInBenchDirectoryError(Exception): + pass + + +class VersionNotFound(Exception): + pass diff --git a/bench/patches/__init__.py b/bench/patches/__init__.py new file mode 100644 index 0000000..625f94d --- /dev/null +++ b/bench/patches/__init__.py @@ -0,0 +1,38 @@ +import os +import importlib + + +def run(bench_path): + source_patch_file = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "patches.txt" + ) + target_patch_file = os.path.join(os.path.abspath(bench_path), "patches.txt") + + with open(source_patch_file) as f: + patches = [ + p.strip() + for p in f.read().splitlines() + if p.strip() and not p.strip().startswith("#") + ] + + executed_patches = [] + if os.path.exists(target_patch_file): + with open(target_patch_file) as f: + executed_patches = f.read().splitlines() + + try: + for patch in patches: + if patch not in executed_patches: + module = importlib.import_module(patch.split()[0]) + execute = getattr(module, "execute") + result = execute(bench_path) + + if not result: + executed_patches.append(patch) + + finally: + with open(target_patch_file, "w") as f: + f.write("\n".join(executed_patches)) + + # end with an empty line + f.write("\n") diff --git a/bench/patches/patches.txt b/bench/patches/patches.txt new file mode 100644 index 0000000..4be1468 --- /dev/null +++ b/bench/patches/patches.txt @@ -0,0 +1,10 @@ +bench.patches.v3.deprecate_old_config +bench.patches.v3.celery_to_rq +bench.patches.v3.redis_bind_ip +bench.patches.v4.update_node +bench.patches.v4.update_socketio +bench.patches.v4.install_yarn #2 +bench.patches.v5.fix_user_permissions +bench.patches.v5.fix_backup_cronjob +bench.patches.v5.set_live_reload_config +bench.patches.v5.update_archived_sites \ No newline at end of file diff --git a/bench/patches/v5/__init__.py b/bench/patches/v5/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bench/patches/v5/fix_backup_cronjob.py b/bench/patches/v5/fix_backup_cronjob.py new file mode 100644 index 0000000..9b1677b --- /dev/null +++ b/bench/patches/v5/fix_backup_cronjob.py @@ -0,0 +1,15 @@ +from bench.config.common_site_config import get_config +from crontab import CronTab + + +def execute(bench_path): + """ + This patch fixes a cron job that would backup sites every minute per 6 hours + """ + + user = get_config(bench_path=bench_path).get("xhiveframework_user") + user_crontab = CronTab(user=user) + + for job in user_crontab.find_comment("bench auto backups set for every 6 hours"): + job.every(6).hours() + user_crontab.write() diff --git a/bench/patches/v5/fix_user_permissions.py b/bench/patches/v5/fix_user_permissions.py new file mode 100644 index 0000000..2b60afe --- /dev/null +++ b/bench/patches/v5/fix_user_permissions.py @@ -0,0 +1,61 @@ +# imports - standard imports +import getpass +import os +import subprocess + +# imports - module imports +from bench.cli import change_uid_msg +from bench.config.production_setup import get_supervisor_confdir, is_centos7, service +from bench.config.common_site_config import get_config +from bench.utils import exec_cmd, get_bench_name, get_cmd_output + + +def is_sudoers_set(): + """Check if bench sudoers is set""" + cmd = ["sudo", "-n", "bench"] + bench_warn = False + + with open(os.devnull, "wb") as f: + return_code_check = not subprocess.call(cmd, stdout=f) + + if return_code_check: + try: + bench_warn = change_uid_msg in get_cmd_output(cmd, _raise=False) + except subprocess.CalledProcessError: + bench_warn = False + finally: + return_code_check = return_code_check and bench_warn + + return return_code_check + + +def is_production_set(bench_path): + """Check if production is set for current bench""" + production_setup = False + bench_name = get_bench_name(bench_path) + + supervisor_conf_extn = "ini" if is_centos7() else "conf" + supervisor_conf_file_name = f"{bench_name}.{supervisor_conf_extn}" + supervisor_conf = os.path.join(get_supervisor_confdir(), supervisor_conf_file_name) + + if os.path.exists(supervisor_conf): + production_setup = production_setup or True + + nginx_conf = f"/etc/nginx/conf.d/{bench_name}.conf" + + if os.path.exists(nginx_conf): + production_setup = production_setup or True + + return production_setup + + +def execute(bench_path): + """This patch checks if bench sudoers is set and regenerate supervisor and sudoers files""" + user = get_config(".").get("xhiveframework_user") or getpass.getuser() + + if is_sudoers_set(): + if is_production_set(bench_path): + exec_cmd(f"sudo bench setup supervisor --yes --user {user}") + service("supervisord", "restart") + + exec_cmd(f"sudo bench setup sudoers {user}") diff --git a/bench/patches/v5/set_live_reload_config.py b/bench/patches/v5/set_live_reload_config.py new file mode 100644 index 0000000..97c5e63 --- /dev/null +++ b/bench/patches/v5/set_live_reload_config.py @@ -0,0 +1,5 @@ +from bench.config.common_site_config import update_config + + +def execute(bench_path): + update_config({"live_reload": True}, bench_path) diff --git a/bench/patches/v5/update_archived_sites.py b/bench/patches/v5/update_archived_sites.py new file mode 100644 index 0000000..26a17ea --- /dev/null +++ b/bench/patches/v5/update_archived_sites.py @@ -0,0 +1,52 @@ +""" +Deprecate archived_sites folder for consistency. This change is +only for Xhiveframework v14 benches. If not a v14 bench yet, skip this +patch and try again later. + +1. Rename folder `./archived_sites` to `./archived/sites` +2. Create a symlink `./archived_sites` => `./archived/sites` + +Corresponding changes in xhiveframework/xhiveframework via https://lab.membtech.com/xhiveframework/xhiveframework15/pull/15060 +""" +import os +from pathlib import Path + +import click +from bench.utils.app import get_current_version +from semantic_version import Version + + +def execute(bench_path): + xhiveframework_version = Version(get_current_version("xhiveframework")) + + if xhiveframework_version.major < 14 or os.name != "posix": + # Returning False means patch has been skipped + return False + + pre_patch_dir = os.getcwd() + old_directory = Path(bench_path, "archived_sites") + new_directory = Path(bench_path, "archived", "sites") + + if not old_directory.exists(): + return False + + if old_directory.is_symlink(): + return True + + os.chdir(bench_path) + + if not os.path.exists(new_directory): + os.makedirs(new_directory) + + old_directory.rename(new_directory) + + click.secho(f"Archived sites are now stored under {new_directory}") + + if not os.listdir(old_directory): + os.rmdir(old_directory) + + os.symlink(new_directory, old_directory) + + click.secho(f"Symlink {old_directory} that points to {new_directory}") + + os.chdir(pre_patch_dir) diff --git a/bench/playbooks/README.md b/bench/playbooks/README.md new file mode 100644 index 0000000..ae01d31 --- /dev/null +++ b/bench/playbooks/README.md @@ -0,0 +1,10 @@ +# Deploying a, developer/production-ready XhiveERP website with Ansible + +## Supported Platforms + - Debian 8, 9 + - Ubuntu 14.04, 16.04 + - CentOS 7 + +## Notes for maintainers + - For MariaDB playbooks refer https://github.com/PCextreme/ansible-role-mariadb + - Any changes made in relation to a role should be dont inside the role and not outside it diff --git a/bench/playbooks/create_user.yml b/bench/playbooks/create_user.yml new file mode 100644 index 0000000..03b0fdb --- /dev/null +++ b/bench/playbooks/create_user.yml @@ -0,0 +1,29 @@ +--- + + - hosts: localhost + become: yes + become_user: root + tasks: + - name: Create user + user: + name: '{{ xhiveframework_user }}' + generate_ssh_key: yes + + - name: Set home folder perms + file: + path: '{{ user_directory }}' + mode: 'o+rx' + owner: '{{ xhiveframework_user }}' + group: '{{ xhiveframework_user }}' + recurse: yes + + - name: Set /tmp/.bench folder perms + file: + path: '{{ repo_path }}' + owner: '{{ xhiveframework_user }}' + group: '{{ xhiveframework_user }}' + recurse: yes + + - name: Change default shell to bash + shell: "chsh {{ xhiveframework_user }} -s $(which bash)" +... diff --git a/bench/playbooks/macosx.yml b/bench/playbooks/macosx.yml new file mode 100644 index 0000000..6514ffd --- /dev/null +++ b/bench/playbooks/macosx.yml @@ -0,0 +1,41 @@ +--- +- hosts: localhost + become: yes + become_user: root + + vars: + bench_repo_path: "/Users/{{ ansible_user_id }}/.bench" + bench_path: "/Users/{{ ansible_user_id }}/xhiveframework-bench" + + tasks: + - name: install prequisites + homebrew: + name: + - cmake + - redis + - mariadb + - nodejs + state: present + + - name: install wkhtmltopdf + homebrew_cask: + name: + - wkhtmltopdf + state: present + + - name: configure mariadb + include: roles/mariadb/tasks/main.yml + vars: + mysql_conf_tpl: roles/mariadb/files/mariadb_config.cnf + + - name: Install MySQLdb in global env + pip: name=mysql-python version=1.2.5 + + # setup xhiveframework-bench + - include: includes/setup_bench.yml + + # setup development environment + - include: includes/setup_dev_env.yml + when: not production + +... diff --git a/bench/playbooks/roles/bash_screen_wall/files/screen_wall.sh b/bench/playbooks/roles/bash_screen_wall/files/screen_wall.sh new file mode 100644 index 0000000..dec411e --- /dev/null +++ b/bench/playbooks/roles/bash_screen_wall/files/screen_wall.sh @@ -0,0 +1,8 @@ +if [ $TERM != 'screen' ] +then + PS1='HEY! USE SCREEN '$PS1 +fi + +sw() { + screen -x $1 || screen -S $1 +} diff --git a/bench/playbooks/roles/bash_screen_wall/tasks/main.yml b/bench/playbooks/roles/bash_screen_wall/tasks/main.yml new file mode 100644 index 0000000..452b7a4 --- /dev/null +++ b/bench/playbooks/roles/bash_screen_wall/tasks/main.yml @@ -0,0 +1,4 @@ +--- +- name: Setup bash screen wall + copy: src=screen_wall.sh dest=/etc/profile.d/screen_wall.sh +... \ No newline at end of file diff --git a/bench/playbooks/roles/bench/tasks/change_ssh_port.yml b/bench/playbooks/roles/bench/tasks/change_ssh_port.yml new file mode 100644 index 0000000..5e850ed --- /dev/null +++ b/bench/playbooks/roles/bench/tasks/change_ssh_port.yml @@ -0,0 +1,20 @@ +--- +- name: Change ssh port + gather_facts: false + hosts: localhost + user: root + tasks: + - name: change sshd config + lineinfile: > + dest=/etc/ssh/sshd_config + regexp="^Port" + line="Port {{ ssh_port }}" + state=present + + - name: restart ssh + service: name=sshd state=reloaded + + - name: Change ansible ssh port to 2332 + set_fact: + ansible_ssh_port: '{{ ssh_port }}' +... \ No newline at end of file diff --git a/bench/playbooks/roles/bench/tasks/main.yml b/bench/playbooks/roles/bench/tasks/main.yml new file mode 100644 index 0000000..bdbd87c --- /dev/null +++ b/bench/playbooks/roles/bench/tasks/main.yml @@ -0,0 +1,82 @@ +--- + - name: Check if /tmp/.bench exists + stat: + path: /tmp/.bench + register: tmp_bench + + - name: Check if bench_repo_path exists + stat: + path: '{{ bench_repo_path }}' + register: bench_repo_register + + - name: move /tmp/.bench if it exists + command: 'cp -R /tmp/.bench {{ bench_repo_path }}' + when: tmp_bench.stat.exists and not bench_repo_register.stat.exists + + - name: install bench + pip: + name: '{{ bench_repo_path }}' + extra_args: '-e' + become: yes + become_user: root + + - name: Overwrite bench if required + file: + state: absent + path: "{{ bench_path }}" + when: overwrite + + - name: Check whether bench exists + stat: + path: "{{ bench_path }}" + register: bench_stat + + - name: Fix permissions + become_user: root + command: chown {{ xhiveframework_user }} -R {{ user_directory }} + + - name: python3 bench init for develop + command: bench init {{ bench_path }} --xhiveframework-path {{ xhiveframework_repo_url }} --xhiveframework-branch {{ xhiveframework_branch }} --python {{ python }} + args: + creates: "{{ bench_path }}" + when: not bench_stat.stat.exists and not production + + - name: python3 bench init for production + command: bench init {{ bench_path }} --xhiveframework-path {{ xhiveframework_repo_url }} --xhiveframework-branch {{ xhiveframework_branch }} --python {{ python }} + args: + creates: "{{ bench_path }}" + when: not bench_stat.stat.exists and production + + # setup common_site_config + - name: setup config + command: bench setup config + args: + creates: "{{ bench_path }}/sites/common_site_config.json" + chdir: "{{ bench_path }}" + + - include_tasks: setup_inputrc.yml + + # Setup Procfile + - name: Setup Procfile + command: bench setup procfile + args: + creates: "{{ bench_path }}/Procfile" + chdir: "{{ bench_path }}" + + # Setup Redis env for RQ + - name: Setup Redis + command: bench setup redis + args: + creates: "{{ bench_path }}/config/redis_socketio.conf" + chdir: "{{ bench_path }}" + + # Setup an XhiveERP site + - include_tasks: setup_xhiveerp.yml + when: not run_travis + + # Setup Bench for production environment + - include_tasks: setup_bench_production.yml + vars: + bench_path: "{{ user_directory }}/{{ bench_name }}" + when: not run_travis and production +... diff --git a/bench/playbooks/roles/bench/tasks/setup_bench_production.yml b/bench/playbooks/roles/bench/tasks/setup_bench_production.yml new file mode 100644 index 0000000..aeedc65 --- /dev/null +++ b/bench/playbooks/roles/bench/tasks/setup_bench_production.yml @@ -0,0 +1,28 @@ +--- +- name: Setup production + become: yes + become_user: root + command: bench setup production {{ xhiveframework_user }} --yes + args: + chdir: '{{ bench_path }}' + +- name: Setup Sudoers + become: yes + become_user: root + command: bench setup sudoers {{ xhiveframework_user }} + args: + chdir: '{{ bench_path }}' + +- name: Set correct permissions on bench.log + file: + path: '{{ bench_path }}/logs/bench.log' + owner: '{{ xhiveframework_user }}' + group: '{{ xhiveframework_user }}' + become: yes + become_user: root + +- name: Restart the bench + command: bench restart + args: + chdir: '{{ bench_path }}' +... diff --git a/bench/playbooks/roles/bench/tasks/setup_erpnext.yml b/bench/playbooks/roles/bench/tasks/setup_erpnext.yml new file mode 100644 index 0000000..bed1f43 --- /dev/null +++ b/bench/playbooks/roles/bench/tasks/setup_erpnext.yml @@ -0,0 +1,29 @@ +--- + - name: Check if XhiveERP App exists + stat: path="{{ bench_path }}/apps/xhiveerp" + register: app + + - name: Get the XhiveERP app + command: bench get-app xhiveerp {{ xhiveerp_repo_url }} --branch {{ xhiveerp_branch }} + args: + creates: "{{ bench_path }}/apps/xhiveerp" + chdir: "{{ bench_path }}" + when: not app.stat.exists and not without_xhiveerp + + - name: Check whether the site already exists + stat: path="{{ bench_path }}/sites/{{ site }}" + register: site_folder + when: not without_site + + - name: Create a new site + command: "bench new-site {{ site }} --admin-password '{{ admin_password }}' --mariadb-root-password '{{ mysql_root_password }}'" + args: + chdir: "{{ bench_path }}" + when: not without_site and not site_folder.stat.exists + + - name: Install XhiveERP to default site + command: "bench --site {{ site }} install-app xhiveerp" + args: + chdir: "{{ bench_path }}" + when: not without_site and not without_xhiveerp +... diff --git a/bench/playbooks/roles/bench/tasks/setup_firewall.yml b/bench/playbooks/roles/bench/tasks/setup_firewall.yml new file mode 100644 index 0000000..f3186b4 --- /dev/null +++ b/bench/playbooks/roles/bench/tasks/setup_firewall.yml @@ -0,0 +1,53 @@ +--- +- name: Setup Firewall + user: root + hosts: localhost + + tasks: + # For CentOS + - name: Enable SELinux + selinux: policy=targeted state=permissive + when: ansible_distribution == 'CentOS' + + - name: Install firewalld + yum: name=firewalld state=present + when: ansible_distribution == 'CentOS' + + - name: Enable Firewall + service: name=firewalld state=started enabled=yes + when: ansible_distribution == 'CentOS' + + - name: Add firewall rules + firewalld: port={{ item }}/tcp permanent=true state=enabled + with_items: + - 80 + - 443 + - "{{ ssh_port }}" + when: ansible_distribution == 'CentOS' + + - name: Restart Firewall + service: name=firewalld state=restarted enabled=yes + when: ansible_distribution == 'CentOS' + + # For Ubuntu / Debian + - name: Install ufw + apt: + state: present + force: yes + pkg: + - python-selinux + - ufw + when: ansible_distribution == 'Ubuntu' or ansible_distribution == 'Debian' + + - name: Enable Firewall + ufw: state=enabled policy=deny + when: ansible_distribution == 'Ubuntu' or ansible_distribution == 'Debian' + + - name: Add firewall rules + ufw: rule=allow proto=tcp port={{ item }} + with_items: + - 80 + - 443 + - "{{ ssh_port }}" + when: ansible_distribution == 'Ubuntu' or ansible_distribution == 'Debian' +... \ No newline at end of file diff --git a/bench/playbooks/roles/bench/tasks/setup_inputrc.yml b/bench/playbooks/roles/bench/tasks/setup_inputrc.yml new file mode 100644 index 0000000..14b47a3 --- /dev/null +++ b/bench/playbooks/roles/bench/tasks/setup_inputrc.yml @@ -0,0 +1,11 @@ +--- +- name: insert/update inputrc for history + blockinfile: + dest: "{{ user_directory }}/.inputrc" + create: yes + block: | + ## arrow up + "\e[A":history-search-backward + ## arrow down + "\e[B":history-search-forward +... diff --git a/bench/playbooks/roles/common/tasks/debian.yml b/bench/playbooks/roles/common/tasks/debian.yml new file mode 100644 index 0000000..c3f340d --- /dev/null +++ b/bench/playbooks/roles/common/tasks/debian.yml @@ -0,0 +1,54 @@ +--- + +- name: Setup OpenSSL dependancy + pip: name=pyOpenSSL version=16.2.0 + +- name: install pillow prerequisites for Debian < 8 + apt: + pkg: + - libjpeg8-dev + - libtiff4-dev + - tcl8.5-dev + - tk8.5-dev + state: present + when: ansible_distribution_version is version_compare('8', 'lt') + +- name: install pillow prerequisites for Debian 8 + apt: + pkg: + - libjpeg62-turbo-dev + - libtiff5-dev + - tcl8.5-dev + - tk8.5-dev + state: present + when: ansible_distribution_version is version_compare('8', 'eq') + +- name: install pillow prerequisites for Debian 9 + apt: + pkg: + - libjpeg62-turbo-dev + - libtiff5-dev + - tcl8.5-dev + - tk8.5-dev + state: present + when: ansible_distribution_version is version_compare('9', 'eq') + + +- name: install pillow prerequisites for Debian >= 10 + apt: + pkg: + - libjpeg62-turbo-dev + - libtiff5-dev + - tcl8.6-dev + - tk8.6-dev + state: present + when: ansible_distribution_version is version_compare('10', 'ge') + +- name: install pdf prerequisites debian + apt: + pkg: + - libssl-dev + state: present + force: yes + +... diff --git a/bench/playbooks/roles/common/tasks/debian_family.yml b/bench/playbooks/roles/common/tasks/debian_family.yml new file mode 100644 index 0000000..64eb848 --- /dev/null +++ b/bench/playbooks/roles/common/tasks/debian_family.yml @@ -0,0 +1,44 @@ +--- + +- name: Install prerequisites using apt-get + become: yes + become_user: root + apt: + pkg: + - dnsmasq + - fontconfig + - git # Version control + - htop # Server stats + - libcrypto++-dev + - libfreetype6-dev + - liblcms2-dev + - libwebp-dev + - libxext6 + - libxrender1 + - libxslt1-dev + - libxslt1.1 + - libffi-dev + - ntp # Clock synchronization + - postfix # Mail Server + - python3-dev # Installing python developer suite + - python-tk + - screen # To aid ssh sessions with connectivity problems + - vim # Is that supposed to be a question!? + - xfonts-75dpi + - xfonts-base + - zlib1g-dev + - apt-transport-https + - libsasl2-dev + - libldap2-dev + - libcups2-dev + - pv # Show progress during database restore + state: present + force: yes + +- include_tasks: debian.yml + when: ansible_distribution == 'Debian' + +- include_tasks: ubuntu.yml + when: ansible_distribution == 'Ubuntu' + +... diff --git a/bench/playbooks/roles/common/tasks/macos.yml b/bench/playbooks/roles/common/tasks/macos.yml new file mode 100644 index 0000000..5cb77d5 --- /dev/null +++ b/bench/playbooks/roles/common/tasks/macos.yml @@ -0,0 +1,39 @@ +--- + +- hosts: localhost + become: yes + become_user: root + vars: + bench_repo_path: "/Users/{{ ansible_user_id }}/.bench" + bench_path: "/Users/{{ ansible_user_id }}/xhiveframework-bench" + tasks: + # install pre-requisites + - name: install prequisites + homebrew: + name: + - cmake + - redis + - mariadb + - nodejs + state: present + + # install wkhtmltopdf + - name: cask installs + homebrew_cask: + name: + - wkhtmltopdf + state: present + + - name: configure mariadb + include_tasks: roles/mariadb/tasks/main.yml + vars: + mysql_conf_tpl: roles/mariadb/files/mariadb_config.cnf + + # setup xhiveframework-bench + - include_tasks: includes/setup_bench.yml + + # setup development environment + - include_tasks: includes/setup_dev_env.yml + when: not production + +... diff --git a/bench/playbooks/roles/common/tasks/main.yml b/bench/playbooks/roles/common/tasks/main.yml new file mode 100644 index 0000000..b6b2be2 --- /dev/null +++ b/bench/playbooks/roles/common/tasks/main.yml @@ -0,0 +1,9 @@ +--- +# Install's prerequisites, like fonts, image libraries, vim, screen, python3-dev + +- include_tasks: debian_family.yml + when: ansible_os_family == 'Debian' + +- include_tasks: redhat_family.yml + when: ansible_os_family == "RedHat" +... \ No newline at end of file diff --git a/bench/playbooks/roles/common/tasks/redhat_family.yml b/bench/playbooks/roles/common/tasks/redhat_family.yml new file mode 100644 index 0000000..ef172fe --- /dev/null +++ b/bench/playbooks/roles/common/tasks/redhat_family.yml @@ -0,0 +1,52 @@ +--- + +- name: Install IUS repo for python 3.6 + become: yes + become_user: root + yum: + name: https://repo.ius.io/ius-release-el7.rpm + state: present + +- name: "Setup prerequisites using yum" + become: yes + become_user: root + yum: + name: + - bzip2-devel + - cronie + - dnsmasq + - freetype-devel + - git + - htop + - lcms2-devel + - libjpeg-devel + - libtiff-devel + - libffi-devel + - libwebp-devel + - libXext + - libXrender + - libzip-devel + - libffi-devel + - ntp + - openssl-devel + - postfix + - python36u + - python-devel + - python-setuptools + - python-pip + - redis + - screen + - sudo + - tcl-devel + - tk-devel + - vim + - which + - xorg-x11-fonts-75dpi + - xorg-x11-fonts-Type1 + - zlib-devel + - openssl-devel + - openldap-devel + - libselinux-python + - cups-libs + state: present +... diff --git a/bench/playbooks/roles/common/tasks/ubuntu.yml b/bench/playbooks/roles/common/tasks/ubuntu.yml new file mode 100644 index 0000000..cdc5606 --- /dev/null +++ b/bench/playbooks/roles/common/tasks/ubuntu.yml @@ -0,0 +1,41 @@ +--- + +- name: install pillow prerequisites for Ubuntu < 14.04 + apt: + pkg: + - libjpeg8-dev + - libtiff4-dev + - tcl8.5-dev + - tk8.5-dev + state: present + force: yes + when: ansible_distribution_version is version_compare('14.04', 'lt') + +- name: install pillow prerequisites for Ubuntu >= 14.04 + apt: + pkg: + - libjpeg8-dev + - libtiff5-dev + - tcl8.6-dev + - tk8.6-dev + state: present + force: yes + when: ansible_distribution_version is version_compare('14.04', 'ge') + +- name: install pdf prerequisites for Ubuntu < 18.04 + apt: + pkg: + - libssl-dev + state: present + force: yes + when: ansible_distribution_version is version_compare('18.04', 'lt') + +- name: install pdf prerequisites for Ubuntu >= 18.04 + apt: + pkg: + - libssl1.1 + state: present + force: yes + when: ansible_distribution_version is version_compare('18.04', 'ge') + +... diff --git a/bench/playbooks/roles/dns_caching/handlers/main.yml b/bench/playbooks/roles/dns_caching/handlers/main.yml new file mode 100644 index 0000000..f3b1d4c --- /dev/null +++ b/bench/playbooks/roles/dns_caching/handlers/main.yml @@ -0,0 +1,4 @@ +--- +- name: restart network manager + service: name=NetworkManager state=restarted +... \ No newline at end of file diff --git a/bench/playbooks/roles/dns_caching/tasks/main.yml b/bench/playbooks/roles/dns_caching/tasks/main.yml new file mode 100644 index 0000000..7c34f0f --- /dev/null +++ b/bench/playbooks/roles/dns_caching/tasks/main.yml @@ -0,0 +1,20 @@ +--- +- name: Check NetworkManager.conf exists + stat: + path: /etc/NetworkManager/NetworkManager.conf + register: result + +- name: Unmask NetworkManager service + command: systemctl unmask NetworkManager + when: result.stat.exists + +- name: Add dnsmasq to network config + lineinfile: > + dest=/etc/NetworkManager/NetworkManager.conf + regexp="dns=" + line="dns=dnsmasq" + state=present + when: result.stat.exists + notify: + - restart network manager +... diff --git a/bench/playbooks/roles/fail2ban/defaults/main.yml b/bench/playbooks/roles/fail2ban/defaults/main.yml new file mode 100644 index 0000000..8101907 --- /dev/null +++ b/bench/playbooks/roles/fail2ban/defaults/main.yml @@ -0,0 +1,5 @@ +--- +fail2ban_nginx_access_log: /var/log/nginx/*access.log +maxretry: 6 +bantime: 600 +findtime: 600 diff --git a/bench/playbooks/roles/fail2ban/handlers/main.yml b/bench/playbooks/roles/fail2ban/handlers/main.yml new file mode 100644 index 0000000..d675d4d --- /dev/null +++ b/bench/playbooks/roles/fail2ban/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: restart fail2ban + service: name=fail2ban state=restarted \ No newline at end of file diff --git a/bench/playbooks/roles/fail2ban/tasks/configure_nginx_jail.yml b/bench/playbooks/roles/fail2ban/tasks/configure_nginx_jail.yml new file mode 100644 index 0000000..b9ced99 --- /dev/null +++ b/bench/playbooks/roles/fail2ban/tasks/configure_nginx_jail.yml @@ -0,0 +1,14 @@ +- name: Configure fail2ban jail options + hosts: localhost + become: yes + become_user: root + vars_files: + - ../defaults/main.yml + tasks: + + - name: Setup filter + template: src="../templates/nginx-proxy-filter.conf.j2" dest="/etc/fail2ban/filter.d/nginx-proxy.conf" + - name: Setup jail + template: src="../templates/nginx-proxy-jail.conf.j2" dest="/etc/fail2ban/jail.d/nginx-proxy.conf" + - name: restart service + service: name=fail2ban state=restarted diff --git a/bench/playbooks/roles/fail2ban/tasks/main.yml b/bench/playbooks/roles/fail2ban/tasks/main.yml new file mode 100644 index 0000000..d786603 --- /dev/null +++ b/bench/playbooks/roles/fail2ban/tasks/main.yml @@ -0,0 +1,28 @@ +--- +- name: Install fail2ban + yum: name=fail2ban state=present + when: ansible_distribution == 'CentOS' + +- name: Install fail2ban + apt: name=fail2ban state=present + when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' + +- name: Enable fail2ban + service: name=fail2ban enabled=yes + +- name: Create jail.d + file: path=/etc/fail2ban/jail.d state=directory + +- name: Setup filters + template: src="{{item}}-filter.conf.j2" dest="/etc/fail2ban/filter.d/{{item}}.conf" + with_items: + - nginx-proxy + notify: + - restart fail2ban + +- name: setup jails + template: src="{{item}}-jail.conf.j2" dest="/etc/fail2ban/jail.d/{{item}}.conf" + with_items: + - nginx-proxy + notify: + - restart fail2ban diff --git a/bench/playbooks/roles/fail2ban/templates/nginx-proxy-filter.conf.j2 b/bench/playbooks/roles/fail2ban/templates/nginx-proxy-filter.conf.j2 new file mode 100644 index 0000000..27f74cd --- /dev/null +++ b/bench/playbooks/roles/fail2ban/templates/nginx-proxy-filter.conf.j2 @@ -0,0 +1,10 @@ +# Block IPs trying to use server as proxy. +[Definition] +failregex = .*\" 400 + .*"[A-Z]* /(cms|muieblackcat|db|cpcommerce|cgi-bin|wp-login|joomla|awstatstotals|wp-content|wp-includes|pma|phpmyadmin|myadmin|mysql|mysqladmin|sqladmin|mypma|admin|xampp|mysqldb|pmadb|phpmyadmin1|phpmyadmin2).*" 4[\d][\d] + .*".*supports_implicit_sdk_logging.*" 4[\d][\d] + .*".*activities?advertiser_tracking_enabled.*" 4[\d][\d] + .*".*/picture?type=normal.*" 4[\d][\d] + .*".*/announce.php?info_hash=.*" 4[\d][\d] + +ignoreregex = \ No newline at end of file diff --git a/bench/playbooks/roles/fail2ban/templates/nginx-proxy-jail.conf.j2 b/bench/playbooks/roles/fail2ban/templates/nginx-proxy-jail.conf.j2 new file mode 100644 index 0000000..c351773 --- /dev/null +++ b/bench/playbooks/roles/fail2ban/templates/nginx-proxy-jail.conf.j2 @@ -0,0 +1,9 @@ +## block hosts trying to abuse our server as a forward proxy +[nginx-proxy] +enabled = true +filter = nginx-proxy +logpath = {{ fail2ban_nginx_access_log }} +action = iptables-multiport[name=NoNginxProxy, port="http,https"] +maxretry = {{ maxretry }} +bantime = {{ bantime }} +findtime = {{ findtime }} \ No newline at end of file diff --git a/bench/playbooks/roles/frappe_selinux/files/frappe_selinux.te b/bench/playbooks/roles/frappe_selinux/files/frappe_selinux.te new file mode 100644 index 0000000..63481e3 --- /dev/null +++ b/bench/playbooks/roles/frappe_selinux/files/frappe_selinux.te @@ -0,0 +1,32 @@ +module xhiveframework_selinux 1.0; + +require { + type user_home_dir_t; + type httpd_t; + type user_home_t; + type soundd_port_t; + class tcp_socket name_connect; + class lnk_file read; + class dir { getattr search }; + class file { read open }; +} + +#============= httpd_t ============== + +#!!!! This avc is allowed in the current policy +allow httpd_t soundd_port_t:tcp_socket name_connect; + +#!!!! This avc is allowed in the current policy +allow httpd_t user_home_dir_t:dir search; + +#!!!! This avc is allowed in the current policy +allow httpd_t user_home_t:dir { getattr search }; + +#!!!! This avc can be allowed using the boolean 'httpd_read_user_content' +allow httpd_t user_home_t:file open; + +#!!!! This avc is allowed in the current policy +allow httpd_t user_home_t:file read; + +#!!!! This avc is allowed in the current policy +allow httpd_t user_home_t:lnk_file read; diff --git a/bench/playbooks/roles/frappe_selinux/tasks/main.yml b/bench/playbooks/roles/frappe_selinux/tasks/main.yml new file mode 100644 index 0000000..4c8a7cb --- /dev/null +++ b/bench/playbooks/roles/frappe_selinux/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: Install deps + yum: + name: + - policycoreutils-python + - selinux-policy-devel + state: present + when: ansible_distribution == 'CentOS' + +- name: Check enabled SELinux modules + shell: semanage module -l + register: enabled_modules + when: ansible_distribution == 'CentOS' + +- name: Copy xhiveframework_selinux policy + copy: src=xhiveframework_selinux.te dest=/root/xhiveframework_selinux.te + register: dest_xhiveframework_selinux_te + when: ansible_distribution == 'CentOS' + +- name: Compile xhiveframework_selinux policy + shell: "make -f /usr/share/selinux/devel/Makefile xhiveframework_selinux.pp && semodule -i xhiveframework_selinux.pp" + args: + chdir: /root/ + when: "ansible_distribution == 'CentOS' and enabled_modules.stdout.find('xhiveframework_selinux') == -1 or dest_xhiveframework_selinux_te.changed" +... diff --git a/bench/playbooks/roles/locale/defaults/main.yml b/bench/playbooks/roles/locale/defaults/main.yml new file mode 100644 index 0000000..82343e3 --- /dev/null +++ b/bench/playbooks/roles/locale/defaults/main.yml @@ -0,0 +1,4 @@ +--- +locale_keymap: us +locale_lang: en_US.utf8 +... \ No newline at end of file diff --git a/bench/playbooks/roles/locale/tasks/main.yml b/bench/playbooks/roles/locale/tasks/main.yml new file mode 100644 index 0000000..8551f69 --- /dev/null +++ b/bench/playbooks/roles/locale/tasks/main.yml @@ -0,0 +1,21 @@ +--- +- name: Check current locale + shell: localectl + register: locale_test + when: ansible_distribution == 'Centos' or ansible_distribution == 'Ubuntu' + +- name: Set Locale + command: "localectl set-locale LANG={{ locale_lang }}" + when: (ansible_distribution == 'Centos' or ansible_distribution == 'Ubuntu') and locale_test.stdout.find('LANG=locale_lang') == -1 + +- name: Set keymap + command: "localectl set-keymap {{ locale_keymap }}" + when: "(ansible_distribution == 'Centos' or ansible_distribution == 'Ubuntu') and locale_test.stdout.find('Keymap:locale_keymap') == -1" + +- name: Set Locale as en_US + lineinfile: dest=/etc/environment backup=yes line="{{ item }}" + with_items: + - "LC_ALL=en_US.UTF-8" + - "LC_CTYPE=en_US.UTF-8" + - "LANG=en_US.UTF-8" +... \ No newline at end of file diff --git a/bench/playbooks/roles/logwatch/defaults/main.yml b/bench/playbooks/roles/logwatch/defaults/main.yml new file mode 100644 index 0000000..fd3675e --- /dev/null +++ b/bench/playbooks/roles/logwatch/defaults/main.yml @@ -0,0 +1,4 @@ +--- +logwatch_emails: "{{ admin_emails }}" +logwatch_detail: High +... \ No newline at end of file diff --git a/bench/playbooks/roles/logwatch/tasks/main.yml b/bench/playbooks/roles/logwatch/tasks/main.yml new file mode 100644 index 0000000..2450ac7 --- /dev/null +++ b/bench/playbooks/roles/logwatch/tasks/main.yml @@ -0,0 +1,13 @@ +--- +- name: Install logwatch + yum: name=logwatch state=present + when: ansible_distribution == 'CentOS' + +- name: Install logwatch on Ubuntu or Debian + apt: name=logwatch state=present + when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' + +- name: Copy logwatch config + template: src=logwatch.conf.j2 dest=/etc/logwatch/conf/logwatch.conf backup=yes + when: admin_emails is defined +... \ No newline at end of file diff --git a/bench/playbooks/roles/logwatch/templates/logwatch.conf.j2 b/bench/playbooks/roles/logwatch/templates/logwatch.conf.j2 new file mode 100644 index 0000000..a5c45cf --- /dev/null +++ b/bench/playbooks/roles/logwatch/templates/logwatch.conf.j2 @@ -0,0 +1,2 @@ +MailTo = {{ logwatch_emails }} +Detail = {{ logwatch_detail }} \ No newline at end of file diff --git a/bench/playbooks/roles/mariadb/README.md b/bench/playbooks/roles/mariadb/README.md new file mode 100644 index 0000000..ad86919 --- /dev/null +++ b/bench/playbooks/roles/mariadb/README.md @@ -0,0 +1,63 @@ +# Ansible Role: MariaDB + +Installs MariaDB + +## Supported platforms + +``` +CentOS 6 & 7 +Ubuntu 14.04 +Ubuntu 16.04 +Debain 9 +``` + +## Post install + +Run `mariadb-secure-installation` + +## Requirements + +None + +## Role Variables + +MariaDB version: + +``` +mariadb_version: 10.2 +``` + +Configuration template: + +``` +mysql_conf_tpl: change_me +``` + +Configuration filename: + +``` +mysql_conf_file: settings.cnf +``` + +### Experimental unattended mariadb-secure-installation + +``` +ansible-playbook release.yml --extra-vars "mysql_secure_installation=true mysql_root_password=your_very_secret_password" +``` + +## Dependencies + +None + +## Example Playbook + +``` +- hosts: servers + roles: + - { role: mariadb } +``` + +## Credits + +- [Attila van der Velde](https://github.com/vdvm) + diff --git a/bench/playbooks/roles/mariadb/defaults/main.yml b/bench/playbooks/roles/mariadb/defaults/main.yml new file mode 100644 index 0000000..dc98044 --- /dev/null +++ b/bench/playbooks/roles/mariadb/defaults/main.yml @@ -0,0 +1,5 @@ +--- +mysql_conf_tpl: change_me +mysql_conf_file: settings.cnf + +mysql_secure_installation: false diff --git a/bench/playbooks/roles/mariadb/files/debian_mariadb_config.cnf b/bench/playbooks/roles/mariadb/files/debian_mariadb_config.cnf new file mode 100644 index 0000000..f2ac13f --- /dev/null +++ b/bench/playbooks/roles/mariadb/files/debian_mariadb_config.cnf @@ -0,0 +1,14 @@ +[mysqld] +innodb-file-format=barracuda +innodb-file-per-table=1 +innodb-large-prefix=1 +character-set-client-handshake = FALSE +character-set-server = utf8mb4 +collation-server = utf8mb4_unicode_ci +max_allowed_packet=256M + +[mysql] +default-character-set = utf8mb4 + +[mysqldump] +max_allowed_packet=256M diff --git a/bench/playbooks/roles/mariadb/files/mariadb_config.cnf b/bench/playbooks/roles/mariadb/files/mariadb_config.cnf new file mode 100644 index 0000000..1944345 --- /dev/null +++ b/bench/playbooks/roles/mariadb/files/mariadb_config.cnf @@ -0,0 +1,64 @@ +[mysqld] + +# GENERAL # +user = mysql +default-storage-engine = InnoDB +socket = /var/lib/mysql/mysql.sock +pid-file = /var/lib/mysql/mysql.pid + +# MyISAM # +key-buffer-size = 32M +myisam-recover = FORCE,BACKUP + +# SAFETY # +max-allowed-packet = 256M +max-connect-errors = 1000000 +innodb = FORCE + +# DATA STORAGE # +datadir = /var/lib/mysql/ + +# BINARY LOGGING # +log-bin = /var/lib/mysql/mysql-bin +expire-logs-days = 14 +sync-binlog = 1 + +# REPLICATION # +server-id = 1 + +# CACHES AND LIMITS # +tmp-table-size = 32M +max-heap-table-size = 32M +query-cache-type = 0 +query-cache-size = 0 +max-connections = 500 +thread-cache-size = 50 +open-files-limit = 65535 +table-definition-cache = 4096 +table-open-cache = 10240 + +# INNODB # +innodb-flush-method = O_DIRECT +innodb-log-files-in-group = 2 +innodb-log-file-size = 512M +innodb-flush-log-at-trx-commit = 1 +innodb-file-per-table = 1 +innodb-buffer-pool-size = {{ (ansible_memtotal_mb*0.685)|round|int }}M +innodb-file-format = barracuda +innodb-large-prefix = 1 +collation-server = utf8mb4_unicode_ci +character-set-server = utf8mb4 +character-set-client-handshake = FALSE +max_allowed_packet = 256M + +# LOGGING # +log-error = /var/lib/mysql/mysql-error.log +log-queries-not-using-indexes = 0 +slow-query-log = 1 +slow-query-log-file = /var/lib/mysql/mysql-slow.log + +[mysql] +default-character-set = utf8mb4 + +[mysqldump] +max_allowed_packet=256M \ No newline at end of file diff --git a/bench/playbooks/roles/mariadb/handlers/main.yml b/bench/playbooks/roles/mariadb/handlers/main.yml new file mode 100644 index 0000000..6f737d9 --- /dev/null +++ b/bench/playbooks/roles/mariadb/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: restart mariadb + service: name=mariadb state=restarted diff --git a/bench/playbooks/roles/mariadb/tasks/centos.yml b/bench/playbooks/roles/mariadb/tasks/centos.yml new file mode 100644 index 0000000..7dbe69d --- /dev/null +++ b/bench/playbooks/roles/mariadb/tasks/centos.yml @@ -0,0 +1,18 @@ +--- +- name: Add repo file + template: src=mariadb_centos.repo.j2 dest=/etc/yum.repos.d/mariadb.repo owner=root group=root mode=0644 + +- name: Install MariaDB + yum: + name: + - MariaDB-server + - MariaDB-client + enablerepo: mariadb + state: present + +- name: Install MySQLdb Python package for secure installations. + yum: + name: + - MySQL-python + state: present + when: mysql_secure_installation and mysql_root_password is defined diff --git a/bench/playbooks/roles/mariadb/tasks/debian.yml b/bench/playbooks/roles/mariadb/tasks/debian.yml new file mode 100644 index 0000000..968aa39 --- /dev/null +++ b/bench/playbooks/roles/mariadb/tasks/debian.yml @@ -0,0 +1,39 @@ +--- +- name: Add apt key for mariadb for Debian <= 8 + apt_key: keyserver=hkp://keyserver.ubuntu.com:80 id=0xcbcb082a1bb943db state=present + when: ansible_distribution_major_version is version_compare('8', 'le') + +- name: Install dirmngr for apt key for mariadb for Debian > 8 + apt: + pkg: dirmngr + state: present + when: ansible_distribution_major_version is version_compare('8', 'gt') + +- name: Add apt key for mariadb for Debian > 8 + apt_key: keyserver=hkp://keyserver.ubuntu.com:80 id=0xF1656F24C74CD1D8 state=present + when: ansible_distribution_major_version is version_compare('8', 'gt') + +- name: Add apt repository + apt_repository: + repo: 'deb [arch=amd64,i386] http://ams2.mirrors.digitalocean.com/mariadb/repo/{{ mariadb_version }}/debian {{ ansible_distribution_release }} main' + state: present + +- name: Add apt repository + apt_repository: + repo: 'deb-src [arch=amd64,i386] http://ams2.mirrors.digitalocean.com/mariadb/repo/{{ mariadb_version }}/debian {{ ansible_distribution_release }} main' + state: present + +- name: Unattended package installation + shell: export DEBIAN_FRONTEND=noninteractive + +- name: apt-get install + apt: + pkg: + - mariadb-server + - mariadb-client + - mariadb-common + - libmariadbclient18 + - python3-mysqldb + update_cache: yes + state: present +... \ No newline at end of file diff --git a/bench/playbooks/roles/mariadb/tasks/main.yml b/bench/playbooks/roles/mariadb/tasks/main.yml new file mode 100644 index 0000000..4d158a7 --- /dev/null +++ b/bench/playbooks/roles/mariadb/tasks/main.yml @@ -0,0 +1,76 @@ +--- +- include: centos.yml + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version|int >= 6 + +- include: ubuntu-trusty.yml + when: ansible_distribution == 'Ubuntu' and ansible_distribution_version == '14.04' + +- include: ubuntu-xenial_bionic.yml + when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version|int >= 16 + +- name: Add configuration + template: + src: '{{ mysql_conf_tpl }}' + dest: '{{ mysql_conf_dir[ansible_distribution] }}/{{ mysql_conf_file }}' + owner: root + group: root + mode: 0644 + when: mysql_conf_tpl != 'change_me' and ansible_distribution != 'Debian' + notify: restart mariadb + +- include_tasks: debian.yml + when: ansible_distribution == 'Debian' + +- name: Add configuration + template: + src: '{{ mysql_conf_tpl }}' + dest: '{{ mysql_conf_dir[ansible_distribution] }}/{{ mysql_conf_file }}' + owner: root + group: root + mode: 0644 + when: mysql_conf_tpl != 'change_me' and ansible_distribution == 'Debian' + notify: restart mariadb + +- name: Add additional conf for MariaDB 10.2 in mariadb.conf.d + blockinfile: + path: /etc/mysql/conf.d/settings.cnf + block: | + # Import all .cnf files from configuration directory + !includedir /etc/mysql/mariadb.conf.d/ + become: yes + become_user: root + when: ansible_distribution == 'Ubuntu' or ansible_distribution == 'Debian' + +- name: Add additional conf for MariaDB 10.2 in mariadb.conf.d + blockinfile: + path: /etc/mysql/mariadb.conf.d/xhiveerp.cnf + block: | + [mysqld] + pid-file = /var/run/mysqld/mysqld.pid + socket = /var/run/mysqld/mysqld.sock + + # setting appeared inside mysql but overwritten by mariadb inside mariadb.conf.d/xx-server.cnf valued as utf8mb4_general_ci + + collation-server = utf8mb4_unicode_ci + create: yes + become: yes + become_user: root + when: ansible_distribution == 'Ubuntu' or ansible_distribution == 'Debian' + +- name: Start and enable service + service: + name: mariadb + state: started + enabled: yes + +- debug: + msg: "{{ mysql_root_password }}" + +- include_tasks: mysql_secure_installation.yml + when: mysql_root_password is defined + +- debug: + var: mysql_secure_installation + when: mysql_secure_installation and mysql_root_password is defined + +... diff --git a/bench/playbooks/roles/mariadb/tasks/mysql_secure_installation.yml b/bench/playbooks/roles/mariadb/tasks/mysql_secure_installation.yml new file mode 100644 index 0000000..0329187 --- /dev/null +++ b/bench/playbooks/roles/mariadb/tasks/mysql_secure_installation.yml @@ -0,0 +1,55 @@ +--- + +- debug: + msg: "{{ mysql_root_password }}" + +# create root .my.cnf config file +- name: Add .my.cnf + template: src=my.cnf.j2 dest=/root/.my.cnf owner=root group=root mode=0600 + +# Set root password +# UPDATE mysql.user SET Password=PASSWORD('mysecret') WHERE User='root'; +# FLUSH PRIVILEGES; + +- name: Set root Password + mysql_user: login_password={{ mysql_root_password }} check_implicit_admin=yes name=root host={{ item }} password={{ mysql_root_password }} state=present + with_items: + - localhost + - 127.0.0.1 + - ::1 + +- name: Reload privilege tables + command: 'mariadb -ne "{{ item }}"' + with_items: + - FLUSH PRIVILEGES + changed_when: False + when: run_travis is not defined + +- name: Remove anonymous users + command: 'mariadb -ne "{{ item }}"' + with_items: + - DELETE FROM mysql.user WHERE User='' + changed_when: False + when: run_travis is not defined + +- name: Disallow root login remotely + command: 'mariadb -ne "{{ item }}"' + with_items: + - DELETE FROM mysql.user WHERE User='root' AND Host NOT IN ('localhost', '127.0.0.1', '::1') + changed_when: False + when: run_travis is not defined + +- name: Remove test database and access to it + command: 'mariadb -ne "{{ item }}"' + with_items: + - DROP DATABASE IF EXISTS test + - DELETE FROM mysql.db WHERE Db='test' OR Db='test\\_%' + changed_when: False + when: run_travis is not defined + +- name: Reload privilege tables + command: 'mariadb -ne "{{ item }}"' + with_items: + - FLUSH PRIVILEGES + changed_when: False + when: run_travis is not defined diff --git a/bench/playbooks/roles/mariadb/tasks/ubuntu-trusty.yml b/bench/playbooks/roles/mariadb/tasks/ubuntu-trusty.yml new file mode 100644 index 0000000..45839b4 --- /dev/null +++ b/bench/playbooks/roles/mariadb/tasks/ubuntu-trusty.yml @@ -0,0 +1,31 @@ +--- +- name: Add repo file + template: src=mariadb_ubuntu.list.j2 dest=/etc/apt/sources.list.d/mariadb.list owner=root group=root mode=0644 + register: mariadb_list + +- name: Add repo key + apt_key: id=1BB943DB url=http://keyserver.ubuntu.com/pks/lookup?op=get&search=0xCBCB082A1BB943DB state=present + register: mariadb_key + +- name: Update apt cache + apt: update_cache=yes + when: mariadb_list.changed == True or mariadb_key.changed == True + +- name: Unattended package installation + shell: export DEBIAN_FRONTEND=noninteractive + changed_when: false + +- name: Install MariaDB + apt: + pkg: + - mariadb-server + - mariadb-client + - libmariadbclient18 + state: present + +- name: Install MySQLdb Python package for secure installations. + apt: + pkg: + - python3-mysqldb + state: present + when: mysql_secure_installation and mysql_root_password is defined diff --git a/bench/playbooks/roles/mariadb/tasks/ubuntu-xenial_bionic.yml b/bench/playbooks/roles/mariadb/tasks/ubuntu-xenial_bionic.yml new file mode 100644 index 0000000..a96dff1 --- /dev/null +++ b/bench/playbooks/roles/mariadb/tasks/ubuntu-xenial_bionic.yml @@ -0,0 +1,31 @@ +--- +- name: Add repo file + template: src=mariadb_ubuntu.list.j2 dest=/etc/apt/sources.list.d/mariadb.list owner=root group=root mode=0644 + register: mariadb_list + +- name: Add repo key + apt_key: id=C74CD1D8 url=http://keyserver.ubuntu.com/pks/lookup?op=get&search=0xF1656F24C74CD1D8 state=present + register: mariadb_key + +- name: Update apt cache + apt: update_cache=yes + when: mariadb_list.changed == True or mariadb_key.changed == True + +- name: Unattended package installation + shell: export DEBIAN_FRONTEND=noninteractive + changed_when: false + +- name: Install MariaDB + apt: + pkg: + - mariadb-server + - mariadb-client + - libmariadbclient18 + state: present + +- name: Install MySQLdb Python package for secure installations. + apt: + pkg: + - python3-mysqldb + state: present + when: mysql_secure_installation and mysql_root_password is defined diff --git a/bench/playbooks/roles/mariadb/templates/mariadb_centos.repo.j2 b/bench/playbooks/roles/mariadb/templates/mariadb_centos.repo.j2 new file mode 100644 index 0000000..64738cc --- /dev/null +++ b/bench/playbooks/roles/mariadb/templates/mariadb_centos.repo.j2 @@ -0,0 +1,7 @@ +# MariaDB CentOS {{ ansible_distribution_major_version|int }} repository list +# http://mariadb.org/mariadb/repositories/ +[mariadb] +name = MariaDB +baseurl = http://yum.mariadb.org/{{ mariadb_version }}/centos{{ ansible_distribution_major_version|int }}-amd64 +gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB +gpgcheck=1 diff --git a/bench/playbooks/roles/mariadb/templates/mariadb_debian.list.j2 b/bench/playbooks/roles/mariadb/templates/mariadb_debian.list.j2 new file mode 100644 index 0000000..710566f --- /dev/null +++ b/bench/playbooks/roles/mariadb/templates/mariadb_debian.list.j2 @@ -0,0 +1,4 @@ +# MariaDB {{ mariadb_version }} Debian {{ ansible_distribution_release | title }} repository list +# http://mariadb.org/mariadb/repositories/ +deb http://ams2.mirrors.digitalocean.com/mariadb/repo/{{ mariadb_version }}/debian {{ ansible_distribution_release | lower }} main +deb-src http://ams2.mirrors.digitalocean.com/mariadb/repo/{{ mariadb_version }}/debian {{ ansible_distribution_release | lower }} main diff --git a/bench/playbooks/roles/mariadb/templates/mariadb_ubuntu.list.j2 b/bench/playbooks/roles/mariadb/templates/mariadb_ubuntu.list.j2 new file mode 100644 index 0000000..981b4d6 --- /dev/null +++ b/bench/playbooks/roles/mariadb/templates/mariadb_ubuntu.list.j2 @@ -0,0 +1,4 @@ +# MariaDB Ubuntu {{ ansible_distribution_release | title }} repository list +# http://mariadb.org/mariadb/repositories/ +deb http://ams2.mirrors.digitalocean.com/mariadb/repo/{{ mariadb_version }}/ubuntu {{ ansible_distribution_release | lower }} main +deb-src http://ams2.mirrors.digitalocean.com/mariadb/repo/{{ mariadb_version }}/ubuntu {{ ansible_distribution_release | lower }} main diff --git a/bench/playbooks/roles/mariadb/templates/my.cnf.j2 b/bench/playbooks/roles/mariadb/templates/my.cnf.j2 new file mode 100644 index 0000000..b63b4e6 --- /dev/null +++ b/bench/playbooks/roles/mariadb/templates/my.cnf.j2 @@ -0,0 +1,3 @@ +[client] +user=root +password={{ mysql_root_password }} diff --git a/bench/playbooks/roles/mariadb/vars/main.yml b/bench/playbooks/roles/mariadb/vars/main.yml new file mode 100644 index 0000000..df81b3b --- /dev/null +++ b/bench/playbooks/roles/mariadb/vars/main.yml @@ -0,0 +1,8 @@ +--- +mysql_conf_dir: + "CentOS": /etc/my.cnf.d + "Ubuntu": /etc/mysql/conf.d + "Debian": /etc/mysql/conf.d +mysql_conf_tpl: files/mariadb_config.cnf +mysql_secure_installation: True +... diff --git a/bench/playbooks/roles/nginx/README.md b/bench/playbooks/roles/nginx/README.md new file mode 100644 index 0000000..00bfb8a --- /dev/null +++ b/bench/playbooks/roles/nginx/README.md @@ -0,0 +1,82 @@ +# Ansible Role: Nginx + +[![Build Status](https://travis-ci.org/geerlingguy/ansible-role-nginx.svg?branch=master)](https://travis-ci.org/geerlingguy/ansible-role-nginx) + +Installs Nginx on RedHat/CentOS or Debian/Ubuntu linux servers. + +This role installs and configures the latest version of Nginx from the Nginx yum repository (on RedHat-based systems) or via apt (on Debian-based systems). You will likely need to do extra setup work after this role has installed Nginx, like adding your own [virtualhost].conf file inside `/etc/nginx/conf.d/`, describing the location and options to use for your particular website. + +## Requirements + +None. + +## Role Variables + +Available variables are listed below, along with default values (see `defaults/main.yml`): + + nginx_vhosts: [] + +A list of vhost definitions (server blocks) for Nginx virtual hosts. If left empty, you will need to supply your own virtual host configuration. See the commented example in `defaults/main.yml` for available server options. If you have a large number of customizations required for your server definition(s), you're likely better off managing the vhost configuration file yourself, leaving this variable set to `[]`. + + nginx_remove_default_vhost: false + +Whether to remove the 'default' virtualhost configuration supplied by Nginx. Useful if you want the base `/` URL to be directed at one of your own virtual hosts configured in a separate .conf file. + + nginx_upstreams: [] + +If you are configuring Nginx as a load balancer, you can define one or more upstream sets using this variable. In addition to defining at least one upstream, you would need to configure one of your server blocks to proxy requests through the defined upstream (e.g. `proxy_pass http://myapp1;`). See the commented example in `defaults/main.yml` for more information. + + nginx_user: "nginx" + +The user under which Nginx will run. Defaults to `nginx` for RedHat, and `www-data` for Debian. + + nginx_worker_processes: "1" + nginx_worker_connections: "1024" + +`nginx_worker_processes` should be set to the number of cores present on your machine. Connections (find this number with `grep processor /proc/cpuinfo | wc -l`). `nginx_worker_connections` is the number of connections per process. Set this higher to handle more simultaneous connections (and remember that a connection will be used for as long as the keepalive timeout duration for every client!). + + nginx_error_log: "/var/log/nginx/error.log warn" + nginx_access_log: "/var/log/nginx/access.log main buffer=16k" + +Configuration of the default error and access logs. Set to `off` to disable a log entirely. + + nginx_sendfile: "on" + nginx_tcp_nopush: "on" + nginx_tcp_nodelay: "on" + +TCP connection options. See [this blog post](https://t37.net/nginx-optimization-understanding-sendfile-tcp_nodelay-and-tcp_nopush.html) for more information on these directives. + + nginx_keepalive_timeout: "65" + nginx_keepalive_requests: "100" + +Nginx keepalive settings. Timeout should be set higher (10s+) if you have more polling-style traffic (AJAX-powered sites especially), or lower (<10s) if you have a site where most users visit a few pages and don't send any further requests. + + nginx_client_max_body_size: "64m" + +This value determines the largest file upload possible, as uploads are passed through Nginx before hitting a backend like `php-fpm`. If you get an error like `client intended to send too large body`, it means this value is set too low. + + nginx_proxy_cache_path: "" + +Set as the `proxy_cache_path` directive in the `nginx.conf` file. By default, this will not be configured (if left as an empty string), but if you wish to use Nginx as a reverse proxy, you can set this to a valid value (e.g. `"/var/cache/nginx keys_zone=cache:32m"`) to use Nginx's cache (further proxy configuration can be done in individual server configurations). + + nginx_default_release: "" + +(For Debian/Ubuntu only) Allows you to set a different repository for the installation of Nginx. As an example, if you are running Debian's wheezy release, and want to get a newer version of Nginx, you can install the `wheezy-backports` repository and set that value here, and Ansible will use that as the `-t` option while installing Nginx. + +## Dependencies + +None. + +## Example Playbook + + - hosts: server + roles: + - { role: geerlingguy.nginx } + +## License + +MIT / BSD + +## Author Information + +This role was created in 2014 by [Jeff Geerling](http://jeffgeerling.com/), author of [Ansible for DevOps](http://ansiblefordevops.com/). diff --git a/bench/playbooks/roles/nginx/defaults/main.yml b/bench/playbooks/roles/nginx/defaults/main.yml new file mode 100644 index 0000000..07eee90 --- /dev/null +++ b/bench/playbooks/roles/nginx/defaults/main.yml @@ -0,0 +1,48 @@ +--- +# Used only for Debian/Ubuntu installation, as the -t option for apt. +nginx_default_release: "" + +nginx_worker_processes: "1" +nginx_worker_connections: "1024" + +nginx_error_log: "/var/log/nginx/error.log warn" +nginx_access_log: "/var/log/nginx/access.log main buffer=16k" + +nginx_sendfile: "on" +nginx_tcp_nopush: "on" +nginx_tcp_nodelay: "on" + +nginx_keepalive_timeout: "65" +nginx_keepalive_requests: "100" + +nginx_client_max_body_size: "64m" + +nginx_proxy_cache_path: "" + +nginx_remove_default_vhost: false +nginx_vhosts: [] +# Example vhost below, showing all available options: +# - { +# listen: "80 default_server", # default: "80 default_server" +# server_name: "example.com", # default: N/A +# root: "/var/www/example.com", # default: N/A +# index: "index.html index.htm", # default: "index.html index.htm" +# +# # Properties that are only added if defined: +# error_page: "", +# access_log: "", +# extra_config: "" # Can be used to add extra config blocks (multiline). +# } + +nginx_upstreams: [] +# - { +# name: myapp1, +# strategy: "ip_hash", # "least_conn", etc. +# servers: { +# "srv1.example.com", +# "srv2.example.com weight=3", +# "srv3.example.com" +# } +# } +nginx_conf_file: nginx.conf.j2 +setup_www_redirect: false \ No newline at end of file diff --git a/bench/playbooks/roles/nginx/handlers/main.yml b/bench/playbooks/roles/nginx/handlers/main.yml new file mode 100644 index 0000000..92971d2 --- /dev/null +++ b/bench/playbooks/roles/nginx/handlers/main.yml @@ -0,0 +1,3 @@ +--- +- name: restart nginx + service: name=nginx state=restarted diff --git a/bench/playbooks/roles/nginx/meta/main.yml b/bench/playbooks/roles/nginx/meta/main.yml new file mode 100644 index 0000000..efbe68f --- /dev/null +++ b/bench/playbooks/roles/nginx/meta/main.yml @@ -0,0 +1,23 @@ +--- +dependencies: [] + +galaxy_info: + author: geerlingguy + description: Nginx installation for Linux/UNIX. + company: "Midwestern Mac, LLC" + license: "license (BSD, MIT)" + min_ansible_version: 1.4 + platforms: + - name: EL + versions: + - 6 + - 7 + - name: Debian + versions: + - all + - name: Ubuntu + versions: + - all + categories: + - development + - web diff --git a/bench/playbooks/roles/nginx/tasks/main.yml b/bench/playbooks/roles/nginx/tasks/main.yml new file mode 100644 index 0000000..8786b34 --- /dev/null +++ b/bench/playbooks/roles/nginx/tasks/main.yml @@ -0,0 +1,51 @@ +--- +# Variable setup. +- name: Include OS-specific variables. + include_vars: "{{ ansible_os_family }}.yml" + +- name: Define nginx_user. + set_fact: + nginx_user: "{{ __nginx_user }}" + when: nginx_user is not defined + +# Setup/install tasks. +- include_tasks: setup-RedHat.yml + when: ansible_os_family == 'RedHat' + +- include_tasks: setup-Debian.yml + when: ansible_os_family == 'Debian' + +# Replace default nginx config with nginx template +- name: Rename default nginx.conf to nginx.conf.old + command: mv /etc/nginx/nginx.conf /etc/nginx/nginx.conf.old + when: ansible_os_family == 'Debian' + +# Nginx setup. +- name: Copy nginx configuration in place. + template: + src: "{{ nginx_conf_file }}" + dest: /etc/nginx/nginx.conf + owner: root + group: root + mode: 0644 + notify: restart nginx + +- name: Setup www redirect + template: + src: ../files/www_redirect.conf + dest: /etc/nginx/conf.d/ + owner: root + group: root + mode: 0644 + notify: restart nginx + when: setup_www_redirect + +- name: Enable SELinux + selinux: policy=targeted state=permissive + when: ansible_distribution == 'CentOS' + +- name: Ensure nginx is started and enabled to start at boot. + service: name=nginx state=started enabled=yes + +- include_tasks: vhosts.yml +... \ No newline at end of file diff --git a/bench/playbooks/roles/nginx/tasks/setup-Debian.yml b/bench/playbooks/roles/nginx/tasks/setup-Debian.yml new file mode 100644 index 0000000..fd9ef24 --- /dev/null +++ b/bench/playbooks/roles/nginx/tasks/setup-Debian.yml @@ -0,0 +1,18 @@ +--- +- name: Add nginx apt repository key for Debian < 8 + apt_key: + url: http://nginx.org/keys/nginx_signing.key + state: present + when: ansible_distribution == 'Debian' and ansible_distribution_version is version_compare('8', 'lt') + +- name: Add nginx apt repository for Debian < 8 + apt_repository: + repo: 'deb [arch=amd64,i386] http://nginx.org/packages/debian/ {{ ansible_distribution_release }} nginx' + state: present + when: ansible_distribution == 'Debian' and ansible_distribution_version is version_compare('8', 'lt') + +- name: Ensure nginx is installed. + apt: + pkg: nginx + state: present + default_release: "{{ nginx_default_release }}" diff --git a/bench/playbooks/roles/nginx/tasks/setup-RedHat.yml b/bench/playbooks/roles/nginx/tasks/setup-RedHat.yml new file mode 100644 index 0000000..73f205e --- /dev/null +++ b/bench/playbooks/roles/nginx/tasks/setup-RedHat.yml @@ -0,0 +1,11 @@ +--- +- name: Enable nginx repo. + template: + src: nginx.repo.j2 + dest: /etc/yum.repos.d/nginx.repo + owner: root + group: root + mode: 0644 + +- name: Ensure nginx is installed. + yum: pkg=nginx state=installed enablerepo=nginx diff --git a/bench/playbooks/roles/nginx/tasks/vhosts.yml b/bench/playbooks/roles/nginx/tasks/vhosts.yml new file mode 100644 index 0000000..5ee8ec2 --- /dev/null +++ b/bench/playbooks/roles/nginx/tasks/vhosts.yml @@ -0,0 +1,22 @@ +--- +- name: Remove default nginx vhost config file (if configured). + file: + path: "{{ nginx_default_vhost_path }}" + state: absent + when: nginx_remove_default_vhost + notify: restart nginx + +- name: Add managed vhost config file (if any vhosts are configured). + template: + src: vhosts.j2 + dest: "{{ nginx_vhost_path }}/vhosts.conf" + mode: 0644 + when: nginx_vhosts + notify: restart nginx + +- name: Remove managed vhost config file (if no vhosts are configured). + file: + path: "{{ nginx_vhost_path }}/vhosts.conf" + state: absent + when: not nginx_vhosts + notify: restart nginx diff --git a/bench/playbooks/roles/nginx/templates/nginx.conf.j2 b/bench/playbooks/roles/nginx/templates/nginx.conf.j2 new file mode 100644 index 0000000..573e3ed --- /dev/null +++ b/bench/playbooks/roles/nginx/templates/nginx.conf.j2 @@ -0,0 +1,76 @@ +user {{ nginx_user }}; +worker_processes auto; +worker_rlimit_nofile 65535; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + + +events { + worker_connections {{ nginx_worker_connections or 2048 }}; + multi_accept on; +} + + +http { + include /etc/nginx/mime.types; + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + tcp_nopush on; + tcp_nodelay on; + server_tokens off; + + # keepalive_timeout 10; + # keepalive_requests 10; + + gzip on; + gzip_disable "msie6"; + gzip_http_version 1.1; + gzip_comp_level 5; + gzip_min_length 256; + gzip_proxied any; + gzip_vary on; + gzip_types + application/atom+xml + application/javascript + application/json + application/rss+xml + application/vnd.ms-fontobject + application/x-font-ttf + application/font-woff + application/x-web-app-manifest+json + application/xhtml+xml + application/xml + font/opentype + image/svg+xml + image/x-icon + text/css + text/plain + text/x-component + ; + + server_names_hash_max_size 4096; + + open_file_cache max=65000 inactive=1m; + open_file_cache_valid 5s; + open_file_cache_min_uses 1; + open_file_cache_errors on; + + ssl_protocols SSLv3 TLSv1; + ssl_ciphers ECDHE-RSA-AES256-SHA384:AES256-SHA256:RC4:HIGH:!MD5:!aNULL:!EDH:!AESGCM; + ssl_prefer_server_ciphers on; + + client_max_body_size 50m; + large_client_header_buffers 4 32k; + + proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=web-cache:8m max_size=1000m inactive=600m; + + include /etc/nginx/conf.d/*.conf; +} diff --git a/bench/playbooks/roles/nginx/templates/nginx.repo.j2 b/bench/playbooks/roles/nginx/templates/nginx.repo.j2 new file mode 100644 index 0000000..9a853b7 --- /dev/null +++ b/bench/playbooks/roles/nginx/templates/nginx.repo.j2 @@ -0,0 +1,5 @@ +[nginx] +name=nginx repo +baseurl=http://nginx.org/packages/centos/{{ ansible_distribution_major_version }}/$basearch/ +gpgcheck=0 +enabled=1 diff --git a/bench/playbooks/roles/nginx/templates/vhosts.j2 b/bench/playbooks/roles/nginx/templates/vhosts.j2 new file mode 100644 index 0000000..09bda35 --- /dev/null +++ b/bench/playbooks/roles/nginx/templates/vhosts.j2 @@ -0,0 +1,24 @@ +{% for vhost in nginx_vhosts %} +server { + listen {{ vhost.listen | default('80 default_server') }}; + server_name {{ vhost.server_name }}; + + root {{ vhost.root }}; + index {{ vhost.index | default('index.html index.htm') }}; + + {% if vhost.error_page is defined %} + error_page {{ vhost.error_page }}; + {% endif %} + {% if vhost.access_log is defined %} + access_log {{ vhost.access_log }}; + {% endif %} + + {% if vhost.return is defined %} + return {{ vhost.return }}; + {% endif %} + + {% if vhost.extra_parameters is defined %} + {{ vhost.extra_parameters }}; + {% endif %} +} +{% endfor %} diff --git a/bench/playbooks/roles/nginx/tests/inventory b/bench/playbooks/roles/nginx/tests/inventory new file mode 100644 index 0000000..2fbb50c --- /dev/null +++ b/bench/playbooks/roles/nginx/tests/inventory @@ -0,0 +1 @@ +localhost diff --git a/bench/playbooks/roles/nginx/tests/test.yml b/bench/playbooks/roles/nginx/tests/test.yml new file mode 100644 index 0000000..42bba2c --- /dev/null +++ b/bench/playbooks/roles/nginx/tests/test.yml @@ -0,0 +1,5 @@ +--- +- hosts: localhost + remote_user: root + roles: + - ansible-role-nginx diff --git a/bench/playbooks/roles/nginx/vars/Debian.yml b/bench/playbooks/roles/nginx/vars/Debian.yml new file mode 100644 index 0000000..b78b7c5 --- /dev/null +++ b/bench/playbooks/roles/nginx/vars/Debian.yml @@ -0,0 +1,4 @@ +--- +nginx_vhost_path: /etc/nginx/sites-enabled +nginx_default_vhost_path: /etc/nginx/sites-enabled/default +__nginx_user: "www-data" diff --git a/bench/playbooks/roles/nginx/vars/RedHat.yml b/bench/playbooks/roles/nginx/vars/RedHat.yml new file mode 100644 index 0000000..2412304 --- /dev/null +++ b/bench/playbooks/roles/nginx/vars/RedHat.yml @@ -0,0 +1,4 @@ +--- +nginx_vhost_path: /etc/nginx/conf.d +nginx_default_vhost_path: /etc/nginx/conf.d/default.conf +__nginx_user: "nginx" diff --git a/bench/playbooks/roles/nodejs/defaults/main.yml b/bench/playbooks/roles/nodejs/defaults/main.yml new file mode 100644 index 0000000..33b3dfc --- /dev/null +++ b/bench/playbooks/roles/nodejs/defaults/main.yml @@ -0,0 +1,3 @@ +--- +node_version: 14 +... diff --git a/bench/playbooks/roles/nodejs/tasks/debian_family.yml b/bench/playbooks/roles/nodejs/tasks/debian_family.yml new file mode 100644 index 0000000..50026f4 --- /dev/null +++ b/bench/playbooks/roles/nodejs/tasks/debian_family.yml @@ -0,0 +1,12 @@ +--- +- name: 'Add Node.js PPA' + tags: 'nodejs' + become: 'yes' + become_method: 'sudo' + shell: "curl --silent --location https://deb.nodesource.com/setup_{{ node_version }}.x | bash -" + +- name: Install nodejs {{ node_version }} + package: + name: nodejs + state: present +... diff --git a/bench/playbooks/roles/nodejs/tasks/main.yml b/bench/playbooks/roles/nodejs/tasks/main.yml new file mode 100644 index 0000000..097d459 --- /dev/null +++ b/bench/playbooks/roles/nodejs/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- include_tasks: debian_family.yml + when: ansible_os_family == 'Debian' + +- include_tasks: redhat_family.yml + when: ansible_os_family == "RedHat" + +- name: Install yarn + command: npm install -g yarn + become: yes + become_user: root +... \ No newline at end of file diff --git a/bench/playbooks/roles/nodejs/tasks/redhat_family.yml b/bench/playbooks/roles/nodejs/tasks/redhat_family.yml new file mode 100644 index 0000000..af47854 --- /dev/null +++ b/bench/playbooks/roles/nodejs/tasks/redhat_family.yml @@ -0,0 +1,11 @@ +--- +- name: 'Add Node.js PPA' + tags: 'nodejs' + become: 'yes' + become_method: 'sudo' + shell: "curl --silent --location https://rpm.nodesource.com/setup_{{ node_version }}.x | sudo bash -" + +- name: Install node v{{ node_version }} + yum: name=nodejs state=present + when: ansible_os_family == 'RedHat' +... \ No newline at end of file diff --git a/bench/playbooks/roles/ntpd/tasks/main.yml b/bench/playbooks/roles/ntpd/tasks/main.yml new file mode 100644 index 0000000..110ab50 --- /dev/null +++ b/bench/playbooks/roles/ntpd/tasks/main.yml @@ -0,0 +1,25 @@ +--- +- name: Install ntpd + yum: + name: + - ntp + - ntpdate + state: present + when: ansible_distribution == 'CentOS' + +- name: Enable ntpd + service: name=ntpd enabled=yes state=started + when: ansible_distribution == 'CentOS' + +- name: Install ntpd + apt: + pkg: + - ntp + - ntpdate + state: present + when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' + +- name: Enable ntpd + service: name=ntp enabled=yes state=started + when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' +... \ No newline at end of file diff --git a/bench/playbooks/roles/packer/tasks/debian_family.yml b/bench/playbooks/roles/packer/tasks/debian_family.yml new file mode 100644 index 0000000..6c3c149 --- /dev/null +++ b/bench/playbooks/roles/packer/tasks/debian_family.yml @@ -0,0 +1,8 @@ +--- +- name: Install unzip + apt: + pkg: + - unzip + update_cache: yes + state: present +... \ No newline at end of file diff --git a/bench/playbooks/roles/packer/tasks/main.yml b/bench/playbooks/roles/packer/tasks/main.yml new file mode 100644 index 0000000..5b99058 --- /dev/null +++ b/bench/playbooks/roles/packer/tasks/main.yml @@ -0,0 +1,37 @@ +--- +- name: Check if packer already exists + stat: + path: /opt/packer + register: packer + +- name: Check if packer version is 1.2.1 + command: /opt/packer --version + register: packer_version + when: packer.stat.exists + +- include_tasks: debian_family.yml + when: ansible_os_family == 'Debian' and packer.stat.exists == False + +- include_tasks: redhat_family.yml + when: ansible_os_family == "RedHat" and packer.stat.exists == False + +- name: Delete packer if < 1.2.1 + file: + state: absent + path: /opt/packer + when: (packer.stat.exists) and (packer_version is version_compare('1.2.1', '<')) + +- name: Download packer zip file + command: chdir=/opt/ wget https://releases.hashicorp.com/packer/1.2.1/packer_1.2.1_linux_amd64.zip + when: (packer.stat.exists == False) or (packer_version is version_compare('1.2.1', '<')) + +- name: Unzip the packer binary in /opt + command: chdir=/opt/ unzip packer_1.2.1_linux_amd64.zip + when: (packer.stat.exists == False) or (packer_version is version_compare('1.2.1', '<')) + +- name: Remove the downloaded packer zip file + file: + state: absent + path: /opt/packer_1.2.1_linux_amd64.zip + when: (packer.stat.exists == False) or (packer_version is version_compare('1.2.1', '<')) +... \ No newline at end of file diff --git a/bench/playbooks/roles/packer/tasks/redhat_family.yml b/bench/playbooks/roles/packer/tasks/redhat_family.yml new file mode 100644 index 0000000..8cb1071 --- /dev/null +++ b/bench/playbooks/roles/packer/tasks/redhat_family.yml @@ -0,0 +1,8 @@ +--- + +- name: Install unzip + yum: + name: + - unzip + state: present +... diff --git a/bench/playbooks/roles/psutil/tasks/main.yml b/bench/playbooks/roles/psutil/tasks/main.yml new file mode 100644 index 0000000..9a38e34 --- /dev/null +++ b/bench/playbooks/roles/psutil/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- name: Install psutil + pip: name=psutil state=latest \ No newline at end of file diff --git a/bench/playbooks/roles/redis/tasks/main.yml b/bench/playbooks/roles/redis/tasks/main.yml new file mode 100644 index 0000000..4c009dc --- /dev/null +++ b/bench/playbooks/roles/redis/tasks/main.yml @@ -0,0 +1,25 @@ +--- + - name: Install yum packages + yum: + name: + - redis + state: present + when: ansible_os_family == 'RedHat' + + # Prerequisite for Debian and Ubuntu + - name: Install apt packages + apt: + pkg: + - redis-server + state: present + force: yes + when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' + + # Prerequisite for MACOS + - name: install prequisites for macos + homebrew: + name: + - redis + state: present + when: ansible_distribution == 'MacOSX' +... \ No newline at end of file diff --git a/bench/playbooks/roles/supervisor/tasks/main.yml b/bench/playbooks/roles/supervisor/tasks/main.yml new file mode 100644 index 0000000..5926f27 --- /dev/null +++ b/bench/playbooks/roles/supervisor/tasks/main.yml @@ -0,0 +1,8 @@ +--- +- name: Install supervisor on centos + yum: name=supervisor state=present + when: ansible_os_family == 'RedHat' + +- name: Install supervisor on debian + apt: pkg=supervisor state=present force=yes + when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' diff --git a/bench/playbooks/roles/swap/defaults/main.yml b/bench/playbooks/roles/swap/defaults/main.yml new file mode 100644 index 0000000..cf25d05 --- /dev/null +++ b/bench/playbooks/roles/swap/defaults/main.yml @@ -0,0 +1,3 @@ +--- +swap_size_mb: 1024 +... \ No newline at end of file diff --git a/bench/playbooks/roles/swap/tasks/main.yml b/bench/playbooks/roles/swap/tasks/main.yml new file mode 100644 index 0000000..1d61d2f --- /dev/null +++ b/bench/playbooks/roles/swap/tasks/main.yml @@ -0,0 +1,18 @@ +- name: Create swap space + command: dd if=/dev/zero of=/extraswap bs=1M count={{swap_size_mb}} + when: ansible_swaptotal_mb < 1 + +- name: Make swap + command: mkswap /extraswap + when: ansible_swaptotal_mb < 1 + +- name: Add to fstab + action: lineinfile dest=/etc/fstab regexp="extraswap" line="/extraswap none swap sw 0 0" state=present + when: ansible_swaptotal_mb < 1 + +- name: Turn swap on + command: swapon -a + when: ansible_swaptotal_mb < 1 + +- name: Set swapiness + shell: echo 1 | tee /proc/sys/vm/swappiness \ No newline at end of file diff --git a/bench/playbooks/roles/virtualbox/defaults/main.yml b/bench/playbooks/roles/virtualbox/defaults/main.yml new file mode 100644 index 0000000..c2c8340 --- /dev/null +++ b/bench/playbooks/roles/virtualbox/defaults/main.yml @@ -0,0 +1,3 @@ +--- +virtualbox_version: 5.2 +... \ No newline at end of file diff --git a/bench/playbooks/roles/virtualbox/files/virtualbox_centos.repo b/bench/playbooks/roles/virtualbox/files/virtualbox_centos.repo new file mode 100644 index 0000000..9ad836a --- /dev/null +++ b/bench/playbooks/roles/virtualbox/files/virtualbox_centos.repo @@ -0,0 +1,7 @@ +[virtualbox] +name=Oracle Linux / RHEL / CentOS-$releasever / $basearch - VirtualBox +baseurl=http://download.virtualbox.org/virtualbox/rpm/el/$releasever/$basearch +enabled=1 +gpgcheck=1 +repo_gpgcheck=1 +gpgkey=https://www.virtualbox.org/download/oracle_vbox.asc \ No newline at end of file diff --git a/bench/playbooks/roles/virtualbox/tasks/debian_family.yml b/bench/playbooks/roles/virtualbox/tasks/debian_family.yml new file mode 100644 index 0000000..9367c9b --- /dev/null +++ b/bench/playbooks/roles/virtualbox/tasks/debian_family.yml @@ -0,0 +1,32 @@ +--- +- name: Install dependencies + apt: + pkg: + - apt-transport-https + - ca-certificates + state: present + +- name: Add VirtualBox to sources.list + apt_repository: + repo: deb https://download.virtualbox.org/virtualbox/debian {{ ansible_distribution_release }} contrib + state: present + +- name: Add apt signing key for VirtualBox for Debian >= 8 and Ubuntu >= 16 + apt_key: + url: https://www.virtualbox.org/download/oracle_vbox_2016.asc + state: present + when: (ansible_distribution == "Debian" and ansible_distribution_major_version >= "8") or (ansible_distribution == "Ubuntu" and ansible_distribution_major_version >= "16") + +- name: Add apt signing key for VirtualBox for Debian < 8 and Ubuntu < 16 + apt_key: + url: https://www.virtualbox.org/download/oracle_vbox.asc + state: present + when: (ansible_distribution == "Debian" and ansible_distribution_major_version < "8") or (ansible_distribution == "Ubuntu" and ansible_distribution_major_version < "16") + +- name: Install VirtualBox + apt: + pkg: + - virtualbox-{{ virtualbox_version }} + update_cache: yes + state: present +... diff --git a/bench/playbooks/roles/virtualbox/tasks/main.yml b/bench/playbooks/roles/virtualbox/tasks/main.yml new file mode 100644 index 0000000..d62fbe7 --- /dev/null +++ b/bench/playbooks/roles/virtualbox/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- include_tasks: debian_family.yml + when: ansible_os_family == 'Debian' + +- include_tasks: redhat_family.yml + when: ansible_os_family == "RedHat" +... \ No newline at end of file diff --git a/bench/playbooks/roles/virtualbox/tasks/redhat_family.yml b/bench/playbooks/roles/virtualbox/tasks/redhat_family.yml new file mode 100644 index 0000000..899de17 --- /dev/null +++ b/bench/playbooks/roles/virtualbox/tasks/redhat_family.yml @@ -0,0 +1,18 @@ +--- +- name: Install the 'Development tools' package group + yum: + name: "@Development tools" + state: present + +- name: Install dependencies + yum: + name: + - kernel-devel + - deltarpm + state: present + +- copy: src=virtualbox_centos.repo dest=/etc/yum.repos.d/virtualbox.repo owner=root group=root mode=0644 force=no + +- name: Install VirtualBox + command: yum install -y VirtualBox-{{ virtualbox_version }} +... diff --git a/bench/playbooks/roles/wkhtmltopdf/tasks/main.yml b/bench/playbooks/roles/wkhtmltopdf/tasks/main.yml new file mode 100644 index 0000000..2a6a89e --- /dev/null +++ b/bench/playbooks/roles/wkhtmltopdf/tasks/main.yml @@ -0,0 +1,108 @@ +--- +- name: install base fonts + yum: + name: + - libXrender + - libXext + - xorg-x11-fonts-75dpi + - xorg-x11-fonts-Type1 + state: present + when: ansible_os_family == 'RedHat' + +- name: install base fonts + apt: + pkg: + - libxrender1 + - libxext6 + - xfonts-75dpi + - xfonts-base + state: present + force: yes + when: ansible_os_family == 'Debian' + +- name: download wkthmltox Ubuntu 20 + get_url: + url: https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.5/wkhtmltox_0.12.5-1.focal_amd64.deb + dest: /tmp/wkhtmltox.deb + when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version == '20' and ansible_architecture != 'aarch64' + +- name: download wkthmltox Ubuntu 20 arm64 + get_url: + # wkhtmltox supports arm64 starting from 0.12.6 + url: https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox_0.12.6-1.focal_arm64.deb + dest: /tmp/wkhtmltox.deb + when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version == '20' and ansible_architecture == 'aarch64' + +- name: download wkthmltox Ubuntu 18 + get_url: + url: https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.5/wkhtmltox_0.12.5-1.bionic_{{ "amd64" if ansible_architecture == "x86_64" else "i386"}}.deb + dest: /tmp/wkhtmltox.deb + checksum: "sha256:{{ 'db48fa1a043309c4bfe8c8e0e38dc06c183f821599dd88d4e3cea47c5a5d4cd3' if ansible_architecture == 'x86_64' else '1f5ac84c1cb25e385b49b94a04807d60bf73da217bc6c9fe2cbd1f0a61d33f63' }}" + when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version == '18' + +- name: download wkthmltox Ubuntu 16 + get_url: + url: https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.5/wkhtmltox_0.12.5-1.xenial_{{ "amd64" if ansible_architecture == "x86_64" else "i386"}}.deb + dest: /tmp/wkhtmltox.deb + checksum: "sha256:{{ 'df203cee4dc9b3efb8d0cd6fc25fa819883224f50c75b76bd9c856903711dc14' if ansible_architecture == 'x86_64' else '27b6edafee099b87b2911cc68b780e79cffed3948bb5a074e8ea1cf8820da156' }}" + when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version == '16' + +- name: download wkthmltox Ubuntu 14 + get_url: + url: https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.5/wkhtmltox_0.12.5-1.trusty_{{ "amd64" if ansible_architecture == "x86_64" else "i386"}}.deb + dest: /tmp/wkhtmltox.deb + checksum: "sha256:{{ '2a3d1fe80da0dbc69da56cf90a3d0ec2786d1b919be29527630d609fea4a6b7c' if ansible_architecture == 'x86_64' else '582e02881e4bc6be9aaa634da1fe8c02d3233fb57f6daab9efa137edb812dd3b' }}" + when: ansible_distribution == 'Ubuntu' and ansible_distribution_major_version == '14' + +- name: download wkthmltox CentOS 6 + get_url: + url: https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.5/wkhtmltox-0.12.5-1.centos6.{{ "x86_64" if ansible_architecture == "x86_64" else "i686"}}.rpm + dest: /tmp/wkhtmltox.rpm + checksum: "sha256:{{ '17bff4966143d240a126b6cc414c6f79aa2106c0c97c772228e84d685221c25f' if ansible_architecture == 'x86_64' else 'c60e75fef5bfa1e79983919ffb47b40dcfbb49d121a510f11ca4b2a2603c00f1' }}" + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '6' + +- name: download wkthmltox CentOS 7 + get_url: + url: https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.5/wkhtmltox-0.12.5-1.centos7.{{ "x86_64" if ansible_architecture == "x86_64" else "i686"}}.rpm + dest: /tmp/wkhtmltox.rpm + checksum: "sha256:{{ 'ac4f909b836fa1fc0188d19a1ab844910f91612e9ccefcb5298aa955a058ffe4' if ansible_architecture == 'x86_64' else '1030279ac4b5b15dda04de2587b2a1942bde1c78aa1837dfec4ddcbea426721f' }}" + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '7' + +- name: download wkthmltox CentOS 8 + get_url: + url: https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.5/wkhtmltox-0.12.5-1.centos8.x86_64.rpm + dest: /tmp/wkhtmltox.rpm + when: ansible_distribution == 'CentOS' and ansible_distribution_major_version == '8' + +- name: download wkthmltox Debian 8 + get_url: + url: https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.5/wkhtmltox_0.12.5-1.jessie_{{ "amd64" if ansible_architecture == "x86_64" else "i386"}}.deb + dest: /tmp/wkhtmltox.deb + checksum: "sha256:{{ '2583399a865d7604726da166ee7cec656b87ae0a6016e6bce7571dcd3045f98b' if ansible_architecture == 'x86_64' else '3a6969f3ed207a805092e05794644eb9e152aaa6518e9204c819fa318947a8a8' }}" + when: ansible_distribution == 'Debian' and ansible_distribution_major_version == '8' + +- name: download wkthmltox Debian 9 + get_url: + url: https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.5/wkhtmltox_0.12.5-1.stretch_{{ "amd64" if ansible_architecture == "x86_64" else "i386"}}.deb + dest: /tmp/wkhtmltox.deb + checksum: "sha256:{{ '1140b0ab02aa6e17346af2f14ed0de807376de475ba90e1db3975f112fbd20bb' if ansible_architecture == 'x86_64' else '5b2d15e738ac479e7a8ca6fd765f406c3684a48091813520f87878278d6dd22a' }}" + when: ansible_distribution == 'Debian' and ansible_distribution_major_version == '9' + +- name: download wkthmltox Debian 10 + get_url: + url: https://github.com/wkhtmltopdf/wkhtmltopdf/releases/download/0.12.5/wkhtmltox_0.12.5-1.buster_{{ "amd64" if ansible_architecture == "x86_64" else "i386"}}.deb + dest: /tmp/wkhtmltox.deb + when: ansible_distribution == 'Debian' and ansible_distribution_major_version == '10' + +- name: Install wkhtmltox rpm + yum: + name: /tmp/wkhtmltox.rpm + state: present + when: ansible_os_family == 'RedHat' + +- name: Install wkhtmltox deb + apt: + deb: /tmp/wkhtmltox.deb + state: present + when: ansible_os_family == 'Debian' +... diff --git a/bench/playbooks/site.yml b/bench/playbooks/site.yml new file mode 100644 index 0000000..b049e07 --- /dev/null +++ b/bench/playbooks/site.yml @@ -0,0 +1,48 @@ +--- +# This is the master playbook that deploys the whole Xhiveframework and XhiveERP stack + +- hosts: localhost + become: yes + become_user: root + roles: + - { role: common, tags: common } + - { role: locale, tags: locale } + - { role: mariadb, tags: mariadb } + - { role: nodejs, tags: nodejs } + - { role: swap, tags: swap, when: production and not container } + - { role: logwatch, tags: logwatch, when: production } + - { role: bash_screen_wall, tags: bash_screen_wall, when: production } + - { role: xhiveframework_selinux, tags: xhiveframework_selinux, when: production } + - { role: dns_caching, tags: dns_caching, when: production } + - { role: ntpd, tags: ntpd, when: production } + - { role: wkhtmltopdf, tags: wkhtmltopdf } + - { role: psutil, tags: psutil } + - { role: redis, tags: redis } + - { role: supervisor, tags: supervisor, when: production } + - { role: nginx, tags: nginx, when: production } + - { role: fail2ban, tags: fail2ban, when: production } + tasks: + - name: Set hostname + hostname: name='{{ hostname }}' + when: hostname is defined and production + + - name: Start NTPD + service: name=ntpd state=started + when: ansible_distribution == 'CentOS' and production + + - name: Start NTPD + service: name=ntp state=started + when: ansible_distribution == 'Debian' or ansible_distribution == 'Ubuntu' and production + + - include_tasks: macosx.yml + when: ansible_distribution == 'MacOSX' + +- name: setup bench and dev environment + hosts: localhost + vars: + bench_repo_path: "{{ user_directory }}/.bench" + bench_path: "{{ user_directory }}/{{ bench_name }}" + roles: + # setup xhiveframework-bench + - { role: bench, tags: "bench", when: not run_travis and not without_bench_setup } +... diff --git a/bench/playbooks/vm_build.yml b/bench/playbooks/vm_build.yml new file mode 100644 index 0000000..2be9639 --- /dev/null +++ b/bench/playbooks/vm_build.yml @@ -0,0 +1,9 @@ +--- +- name: Install Packer + hosts: localhost + become: yes + become_user: root + roles: + - { role: virtualbox, tags: "virtualbox" } + - { role: packer, tags: "packer" } +... diff --git a/bench/tests/__init__.py b/bench/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bench/tests/test_base.py b/bench/tests/test_base.py new file mode 100644 index 0000000..67ec72b --- /dev/null +++ b/bench/tests/test_base.py @@ -0,0 +1,134 @@ +# imports - standard imports +import getpass +import json +import os +import shutil +import subprocess +import sys +import traceback +import unittest + +# imports - module imports +from bench.utils import paths_in_bench, exec_cmd +from bench.utils.system import init +from bench.bench import Bench + +PYTHON_VER = sys.version_info + +XHIVEFRAMEWORK_BRANCH = "version-13-hotfix" +if PYTHON_VER.major == 3: + if PYTHON_VER.minor >= 10: + XHIVEFRAMEWORK_BRANCH = "develop" + + +class TestBenchBase(unittest.TestCase): + def setUp(self): + self.benches_path = "." + self.benches = [] + + def tearDown(self): + for bench_name in self.benches: + bench_path = os.path.join(self.benches_path, bench_name) + bench = Bench(bench_path) + mariadb_password = ( + "travis" + if os.environ.get("CI") + else getpass.getpass(prompt="Enter MariaDB root Password: ") + ) + + if bench.exists: + for site in bench.sites: + subprocess.call( + [ + "bench", + "drop-site", + site, + "--force", + "--no-backup", + "--root-password", + mariadb_password, + ], + cwd=bench_path, + ) + shutil.rmtree(bench_path, ignore_errors=True) + + def assert_folders(self, bench_name): + for folder in paths_in_bench: + self.assert_exists(bench_name, folder) + self.assert_exists(bench_name, "apps", "xhiveframework") + + def assert_virtual_env(self, bench_name): + bench_path = os.path.abspath(bench_name) + python_path = os.path.abspath(os.path.join(bench_path, "env", "bin", "python")) + self.assertTrue(python_path.startswith(bench_path)) + for subdir in ("bin", "lib", "share"): + self.assert_exists(bench_name, "env", subdir) + + def assert_config(self, bench_name): + for config, search_key in ( + ("redis_queue.conf", "redis_queue.rdb"), + ("redis_cache.conf", "redis_cache.rdb"), + ): + + self.assert_exists(bench_name, "config", config) + + with open(os.path.join(bench_name, "config", config)) as f: + self.assertTrue(search_key in f.read()) + + def assert_common_site_config(self, bench_name, expected_config): + common_site_config_path = os.path.join( + self.benches_path, bench_name, "sites", "common_site_config.json" + ) + self.assertTrue(os.path.exists(common_site_config_path)) + + with open(common_site_config_path) as f: + config = json.load(f) + + for key, value in list(expected_config.items()): + self.assertEqual(config.get(key), value) + + def assert_exists(self, *args): + self.assertTrue(os.path.exists(os.path.join(*args))) + + def new_site(self, site_name, bench_name): + new_site_cmd = ["bench", "new-site", site_name, "--admin-password", "admin"] + + if os.environ.get("CI"): + new_site_cmd.extend(["--mariadb-root-password", "travis"]) + + subprocess.call(new_site_cmd, cwd=os.path.join(self.benches_path, bench_name)) + + def init_bench(self, bench_name, **kwargs): + self.benches.append(bench_name) + xhiveframework_tmp_path = "/tmp/xhiveframework" + + if not os.path.exists(xhiveframework_tmp_path): + exec_cmd( + f"git clone https://lab.membtech.com/xhiveframework/xhiveframework15 -b {XHIVEFRAMEWORK_BRANCH} --depth 1 --origin upstream {xhiveframework_tmp_path}" + ) + + kwargs.update( + dict( + python=sys.executable, + no_procfile=True, + no_backups=True, + xhiveframework_path=xhiveframework_tmp_path, + ) + ) + + if not os.path.exists(os.path.join(self.benches_path, bench_name)): + init(bench_name, **kwargs) + exec_cmd( + "git remote set-url upstream https://lab.membtech.com/xhiveframework/xhiveframework15", + cwd=os.path.join(self.benches_path, bench_name, "apps", "xhiveframework"), + ) + + def file_exists(self, path): + if os.environ.get("CI"): + return not subprocess.call(["sudo", "test", "-f", path]) + return os.path.isfile(path) + + def get_traceback(self): + exc_type, exc_value, exc_tb = sys.exc_info() + trace_list = traceback.format_exception(exc_type, exc_value, exc_tb) + return "".join(str(t) for t in trace_list) diff --git a/bench/tests/test_init.py b/bench/tests/test_init.py new file mode 100755 index 0000000..5d343e5 --- /dev/null +++ b/bench/tests/test_init.py @@ -0,0 +1,205 @@ +# imports - standard imports +import json +import os +import subprocess +import unittest + +# imports - third paty imports +import git + +# imports - module imports +from bench.utils import exec_cmd +from bench.app import App +from bench.tests.test_base import XHIVEFRAMEWORK_BRANCH, TestBenchBase +from bench.bench import Bench + + +# changed from xhiveframework_theme because it wasn't maintained and incompatible, +# chat app & wiki was breaking too. hopefully xhiveframework_docs will be maintained +# for longer since docs.xhiveerp.com is powered by it ;) +TEST_XHIVEFRAMEWORK_APP = "xhiveframework_docs" + + +class TestBenchInit(TestBenchBase): + def test_utils(self): + self.assertEqual(subprocess.call("bench"), 0) + + def test_init(self, bench_name="test-bench", **kwargs): + self.init_bench(bench_name, **kwargs) + app = App("file:///tmp/xhiveframework") + self.assertTupleEqual( + (app.mount_path, app.url, app.repo, app.app_name, app.org), + ("/tmp/xhiveframework", "file:///tmp/xhiveframework", "xhiveframework", "xhiveframework", "xhiveframework"), + ) + self.assert_folders(bench_name) + self.assert_virtual_env(bench_name) + self.assert_config(bench_name) + test_bench = Bench(bench_name) + app = App("xhiveframework", bench=test_bench) + self.assertEqual(app.from_apps, True) + + def basic(self): + try: + self.test_init() + except Exception: + print(self.get_traceback()) + + def test_multiple_benches(self): + for bench_name in ("test-bench-1", "test-bench-2"): + self.init_bench(bench_name, skip_assets=True) + + self.assert_common_site_config( + "test-bench-1", + { + "webserver_port": 8000, + "socketio_port": 9000, + "file_watcher_port": 6787, + "redis_queue": "redis://127.0.0.1:11000", + "redis_socketio": "redis://127.0.0.1:13000", + "redis_cache": "redis://127.0.0.1:13000", + }, + ) + + self.assert_common_site_config( + "test-bench-2", + { + "webserver_port": 8001, + "socketio_port": 9001, + "file_watcher_port": 6788, + "redis_queue": "redis://127.0.0.1:11001", + "redis_socketio": "redis://127.0.0.1:13001", + "redis_cache": "redis://127.0.0.1:13001", + }, + ) + + def test_new_site(self): + bench_name = "test-bench" + site_name = "test-site.local" + bench_path = os.path.join(self.benches_path, bench_name) + site_path = os.path.join(bench_path, "sites", site_name) + site_config_path = os.path.join(site_path, "site_config.json") + + self.init_bench(bench_name) + self.new_site(site_name, bench_name) + + self.assertTrue(os.path.exists(site_path)) + self.assertTrue(os.path.exists(os.path.join(site_path, "private", "backups"))) + self.assertTrue(os.path.exists(os.path.join(site_path, "private", "files"))) + self.assertTrue(os.path.exists(os.path.join(site_path, "public", "files"))) + self.assertTrue(os.path.exists(site_config_path)) + + with open(site_config_path) as f: + site_config = json.loads(f.read()) + + for key in ("db_name", "db_password"): + self.assertTrue(key in site_config) + self.assertTrue(site_config[key]) + + def test_get_app(self): + self.init_bench("test-bench", skip_assets=True) + bench_path = os.path.join(self.benches_path, "test-bench") + exec_cmd(f"bench get-app {TEST_XHIVEFRAMEWORK_APP} --skip-assets", cwd=bench_path) + self.assertTrue(os.path.exists(os.path.join(bench_path, "apps", TEST_XHIVEFRAMEWORK_APP))) + app_installed_in_env = TEST_XHIVEFRAMEWORK_APP in subprocess.check_output( + ["bench", "pip", "freeze"], cwd=bench_path + ).decode("utf8") + self.assertTrue(app_installed_in_env) + + @unittest.skipIf(XHIVEFRAMEWORK_BRANCH != "develop", "only for develop branch") + def test_get_app_resolve_deps(self): + XHIVEFRAMEWORK_APP = "healthcare" + self.init_bench("test-bench", skip_assets=True) + bench_path = os.path.join(self.benches_path, "test-bench") + exec_cmd(f"bench get-app {XHIVEFRAMEWORK_APP} --resolve-deps --skip-assets", cwd=bench_path) + self.assertTrue(os.path.exists(os.path.join(bench_path, "apps", XHIVEFRAMEWORK_APP))) + + states_path = os.path.join(bench_path, "sites", "apps.json") + self.assertTrue(os.path.exists(states_path)) + + with open(states_path) as f: + states = json.load(f) + + self.assertTrue(XHIVEFRAMEWORK_APP in states) + + def test_install_app(self): + bench_name = "test-bench" + site_name = "install-app.test" + bench_path = os.path.join(self.benches_path, "test-bench") + + self.init_bench(bench_name, skip_assets=True) + exec_cmd( + f"bench get-app {TEST_XHIVEFRAMEWORK_APP} --branch master --skip-assets", cwd=bench_path + ) + + self.assertTrue(os.path.exists(os.path.join(bench_path, "apps", TEST_XHIVEFRAMEWORK_APP))) + + # check if app is installed + app_installed_in_env = TEST_XHIVEFRAMEWORK_APP in subprocess.check_output( + ["bench", "pip", "freeze"], cwd=bench_path + ).decode("utf8") + self.assertTrue(app_installed_in_env) + + # create and install app on site + self.new_site(site_name, bench_name) + installed_app = not exec_cmd( + f"bench --site {site_name} install-app {TEST_XHIVEFRAMEWORK_APP}", + cwd=bench_path, + _raise=False, + ) + + if installed_app: + app_installed_on_site = subprocess.check_output( + ["bench", "--site", site_name, "list-apps"], cwd=bench_path + ).decode("utf8") + self.assertTrue(TEST_XHIVEFRAMEWORK_APP in app_installed_on_site) + + def test_remove_app(self): + self.init_bench("test-bench", skip_assets=True) + bench_path = os.path.join(self.benches_path, "test-bench") + + exec_cmd( + f"bench get-app {TEST_XHIVEFRAMEWORK_APP} --branch master --overwrite --skip-assets", + cwd=bench_path, + ) + exec_cmd(f"bench remove-app {TEST_XHIVEFRAMEWORK_APP}", cwd=bench_path) + + with open(os.path.join(bench_path, "sites", "apps.txt")) as f: + self.assertFalse(TEST_XHIVEFRAMEWORK_APP in f.read()) + self.assertFalse( + TEST_XHIVEFRAMEWORK_APP + in subprocess.check_output(["bench", "pip", "freeze"], cwd=bench_path).decode("utf8") + ) + self.assertFalse(os.path.exists(os.path.join(bench_path, "apps", TEST_XHIVEFRAMEWORK_APP))) + + def test_switch_to_branch(self): + self.init_bench("test-bench", skip_assets=True) + bench_path = os.path.join(self.benches_path, "test-bench") + app_path = os.path.join(bench_path, "apps", "xhiveframework") + + # * chore: change to 14 when avalible + prevoius_branch = "version-13" + if XHIVEFRAMEWORK_BRANCH != "develop": + # assuming we follow `version-#` + prevoius_branch = f"version-{int(XHIVEFRAMEWORK_BRANCH.split('-')[1]) - 1}" + + successful_switch = not exec_cmd( + f"bench switch-to-branch {prevoius_branch} xhiveframework --upgrade", + cwd=bench_path, + _raise=False, + ) + if successful_switch: + app_branch_after_switch = str(git.Repo(path=app_path).active_branch) + self.assertEqual(prevoius_branch, app_branch_after_switch) + + successful_switch = not exec_cmd( + f"bench switch-to-branch {XHIVEFRAMEWORK_BRANCH} xhiveframework --upgrade", + cwd=bench_path, + _raise=False, + ) + if successful_switch: + app_branch_after_second_switch = str(git.Repo(path=app_path).active_branch) + self.assertEqual(XHIVEFRAMEWORK_BRANCH, app_branch_after_second_switch) + + +if __name__ == "__main__": + unittest.main() diff --git a/bench/tests/test_setup_production.py b/bench/tests/test_setup_production.py new file mode 100644 index 0000000..c46e193 --- /dev/null +++ b/bench/tests/test_setup_production.py @@ -0,0 +1,182 @@ +# imports - standard imports +import getpass +import os +import pathlib +import re +import subprocess +import time +import unittest + +# imports - module imports +from bench.utils import exec_cmd, get_cmd_output, which +from bench.config.production_setup import get_supervisor_confdir +from bench.tests.test_base import TestBenchBase + + +class TestSetupProduction(TestBenchBase): + def test_setup_production(self): + user = getpass.getuser() + + for bench_name in ("test-bench-1", "test-bench-2"): + bench_path = os.path.join(os.path.abspath(self.benches_path), bench_name) + self.init_bench(bench_name) + exec_cmd(f"sudo bench setup production {user} --yes", cwd=bench_path) + self.assert_nginx_config(bench_name) + self.assert_supervisor_config(bench_name) + self.assert_supervisor_process(bench_name) + + self.assert_nginx_process() + exec_cmd(f"sudo bench setup sudoers {user}") + self.assert_sudoers(user) + + for bench_name in self.benches: + bench_path = os.path.join(os.path.abspath(self.benches_path), bench_name) + exec_cmd("sudo bench disable-production", cwd=bench_path) + + def production(self): + try: + self.test_setup_production() + except Exception: + print(self.get_traceback()) + + def assert_nginx_config(self, bench_name): + conf_src = os.path.join( + os.path.abspath(self.benches_path), bench_name, "config", "nginx.conf" + ) + conf_dest = f"/etc/nginx/conf.d/{bench_name}.conf" + + self.assertTrue(self.file_exists(conf_src)) + self.assertTrue(self.file_exists(conf_dest)) + + # symlink matches + self.assertEqual(os.path.realpath(conf_dest), conf_src) + + # file content + with open(conf_src) as f: + f = f.read() + + for key in ( + f"upstream {bench_name}-xhiveframework", + f"upstream {bench_name}-socketio-server", + ): + self.assertTrue(key in f) + + def assert_nginx_process(self): + out = get_cmd_output("sudo nginx -t 2>&1") + self.assertTrue( + "nginx: configuration file /etc/nginx/nginx.conf test is successful" in out + ) + + def assert_sudoers(self, user): + sudoers_file = "/etc/sudoers.d/xhiveframework" + service = which("service") + nginx = which("nginx") + + self.assertTrue(self.file_exists(sudoers_file)) + + if os.environ.get("CI"): + sudoers = subprocess.check_output(["sudo", "cat", sudoers_file]).decode("utf-8") + else: + sudoers = pathlib.Path(sudoers_file).read_text() + self.assertTrue(f"{user} ALL = (root) NOPASSWD: {service} nginx *" in sudoers) + self.assertTrue(f"{user} ALL = (root) NOPASSWD: {nginx}" in sudoers) + + def assert_supervisor_config(self, bench_name, use_rq=True): + conf_src = os.path.join( + os.path.abspath(self.benches_path), bench_name, "config", "supervisor.conf" + ) + + supervisor_conf_dir = get_supervisor_confdir() + conf_dest = f"{supervisor_conf_dir}/{bench_name}.conf" + + self.assertTrue(self.file_exists(conf_src)) + self.assertTrue(self.file_exists(conf_dest)) + + # symlink matches + self.assertEqual(os.path.realpath(conf_dest), conf_src) + + # file content + with open(conf_src) as f: + f = f.read() + + tests = [ + f"program:{bench_name}-xhiveframework-web", + f"program:{bench_name}-redis-cache", + f"program:{bench_name}-redis-queue", + f"group:{bench_name}-web", + f"group:{bench_name}-workers", + f"group:{bench_name}-redis", + ] + + if not os.environ.get("CI"): + tests.append(f"program:{bench_name}-node-socketio") + + if use_rq: + tests.extend( + [ + f"program:{bench_name}-xhiveframework-schedule", + f"program:{bench_name}-xhiveframework-default-worker", + f"program:{bench_name}-xhiveframework-short-worker", + f"program:{bench_name}-xhiveframework-long-worker", + ] + ) + + else: + tests.extend( + [ + f"program:{bench_name}-xhiveframework-workerbeat", + f"program:{bench_name}-xhiveframework-worker", + f"program:{bench_name}-xhiveframework-longjob-worker", + f"program:{bench_name}-xhiveframework-async-worker", + ] + ) + + for key in tests: + self.assertTrue(key in f) + + def assert_supervisor_process(self, bench_name, use_rq=True, disable_production=False): + out = get_cmd_output("supervisorctl status") + + while "STARTING" in out: + print("Waiting for all processes to start...") + time.sleep(10) + out = get_cmd_output("supervisorctl status") + + tests = [ + r"{bench_name}-web:{bench_name}-xhiveframework-web[\s]+RUNNING", + # Have commented for the time being. Needs to be uncommented later on. Bench is failing on travis because of this. + # It works on one bench and fails on another.giving FATAL or BACKOFF (Exited too quickly (process log may have details)) + # "{bench_name}-web:{bench_name}-node-socketio[\s]+RUNNING", + r"{bench_name}-redis:{bench_name}-redis-cache[\s]+RUNNING", + r"{bench_name}-redis:{bench_name}-redis-queue[\s]+RUNNING", + ] + + if use_rq: + tests.extend( + [ + r"{bench_name}-workers:{bench_name}-xhiveframework-schedule[\s]+RUNNING", + r"{bench_name}-workers:{bench_name}-xhiveframework-default-worker-0[\s]+RUNNING", + r"{bench_name}-workers:{bench_name}-xhiveframework-short-worker-0[\s]+RUNNING", + r"{bench_name}-workers:{bench_name}-xhiveframework-long-worker-0[\s]+RUNNING", + ] + ) + + else: + tests.extend( + [ + r"{bench_name}-workers:{bench_name}-xhiveframework-workerbeat[\s]+RUNNING", + r"{bench_name}-workers:{bench_name}-xhiveframework-worker[\s]+RUNNING", + r"{bench_name}-workers:{bench_name}-xhiveframework-longjob-worker[\s]+RUNNING", + r"{bench_name}-workers:{bench_name}-xhiveframework-async-worker[\s]+RUNNING", + ] + ) + + for key in tests: + if disable_production: + self.assertFalse(re.search(key, out)) + else: + self.assertTrue(re.search(key, out)) + + +if __name__ == "__main__": + unittest.main() diff --git a/bench/tests/test_utils.py b/bench/tests/test_utils.py new file mode 100644 index 0000000..2258d9b --- /dev/null +++ b/bench/tests/test_utils.py @@ -0,0 +1,106 @@ +import os +import shutil +import subprocess +import unittest + +from bench.app import App +from bench.bench import Bench +from bench.exceptions import InvalidRemoteException +from bench.utils import is_valid_xhiveframework_branch + + +class TestUtils(unittest.TestCase): + def test_app_utils(self): + git_url = "https://lab.membtech.com/xhiveframework/xhiveframework15" + branch = "develop" + app = App(name=git_url, branch=branch, bench=Bench(".")) + self.assertTrue( + all( + [ + app.name == git_url, + app.branch == branch, + app.tag == branch, + app.is_url is True, + app.on_disk is False, + app.org == "xhiveframework", + app.url == git_url, + ] + ) + ) + + def test_is_valid_xhiveframework_branch(self): + with self.assertRaises(InvalidRemoteException): + is_valid_xhiveframework_branch( + "https://lab.membtech.com/xhiveframework/xhiveframework15.git", xhiveframework_branch="random-branch" + ) + is_valid_xhiveframework_branch( + "https://github.com/random/random.git", xhiveframework_branch="random-branch" + ) + + is_valid_xhiveframework_branch( + "https://lab.membtech.com/xhiveframework/xhiveframework15.git", xhiveframework_branch="develop" + ) + is_valid_xhiveframework_branch( + "https://lab.membtech.com/xhiveframework/xhiveframework15.git", xhiveframework_branch="v13.29.0" + ) + + def test_app_states(self): + bench_dir = "./sandbox" + sites_dir = os.path.join(bench_dir, "sites") + + if not os.path.exists(sites_dir): + os.makedirs(sites_dir) + + fake_bench = Bench(bench_dir) + + self.assertTrue(hasattr(fake_bench.apps, "states")) + + fake_bench.apps.states = { + "xhiveframework": { + "resolution": {"branch": "develop", "commit_hash": "234rwefd"}, + "version": "14.0.0-dev", + } + } + fake_bench.apps.update_apps_states() + + self.assertEqual(fake_bench.apps.states, {}) + + xhiveframework_path = os.path.join(bench_dir, "apps", "xhiveframework") + + os.makedirs(os.path.join(xhiveframework_path, "xhiveframework")) + + subprocess.run(["git", "init"], cwd=xhiveframework_path, capture_output=True, check=True) + + with open(os.path.join(xhiveframework_path, "xhiveframework", "__init__.py"), "w+") as f: + f.write("__version__ = '11.0'") + + subprocess.run(["git", "add", "."], cwd=xhiveframework_path, capture_output=True, check=True) + subprocess.run( + ["git", "config", "user.email", "bench-test_app_states@gha.com"], + cwd=xhiveframework_path, + capture_output=True, + check=True, + ) + subprocess.run( + ["git", "config", "user.name", "App States Test"], + cwd=xhiveframework_path, + capture_output=True, + check=True, + ) + subprocess.run( + ["git", "commit", "-m", "temp"], cwd=xhiveframework_path, capture_output=True, check=True + ) + + fake_bench.apps.update_apps_states(app_name="xhiveframework") + + self.assertIn("xhiveframework", fake_bench.apps.states) + self.assertIn("version", fake_bench.apps.states["xhiveframework"]) + self.assertEqual("11.0", fake_bench.apps.states["xhiveframework"]["version"]) + + shutil.rmtree(bench_dir) + + def test_ssh_ports(self): + app = App("git@github.com:22:xhiveframework/xhiveframework15") + self.assertEqual( + (app.use_ssh, app.org, app.repo, app.app_name), (True, "xhiveframework", "xhiveframework", "xhiveframework") + ) diff --git a/bench/utils/__init__.py b/bench/utils/__init__.py new file mode 100644 index 0000000..33b699e --- /dev/null +++ b/bench/utils/__init__.py @@ -0,0 +1,607 @@ +# imports - standard imports +import json +import logging +import os +import re +import subprocess +import sys +from functools import lru_cache +from glob import glob +from pathlib import Path +from shlex import split +from tarfile import TarInfo +from typing import List, Optional, Tuple + +# imports - third party imports +import click + +# imports - module imports +from bench import PROJECT_NAME, VERSION +from bench.exceptions import ( + AppNotInstalledError, + CommandFailedError, + InvalidRemoteException, +) + +logger = logging.getLogger(PROJECT_NAME) +paths_in_app = ("hooks.py", "modules.txt", "patches.txt") +paths_in_bench = ("apps", "sites", "config", "logs", "config/pids") +sudoers_file = "/etc/sudoers.d/xhiveframework" +UNSET_ARG = object() + + +def is_bench_directory(directory=os.path.curdir): + is_bench = True + + for folder in paths_in_bench: + path = os.path.abspath(os.path.join(directory, folder)) + is_bench = is_bench and os.path.exists(path) + + return is_bench + + +def is_xhiveframework_app(directory: str) -> bool: + is_xhiveframework_app = True + + for folder in paths_in_app: + if not is_xhiveframework_app: + break + + path = glob(os.path.join(directory, "**", folder)) + is_xhiveframework_app = is_xhiveframework_app and path + + return bool(is_xhiveframework_app) + +def get_bench_cache_path(sub_dir: Optional[str]) -> Path: + relative_path = "~/.cache/bench" + if sub_dir and not sub_dir.startswith("/"): + relative_path += f"/{sub_dir}" + + cache_path = os.path.expanduser(relative_path) + cache_path = Path(cache_path) + cache_path.mkdir(parents=True, exist_ok=True) + return cache_path + +@lru_cache(maxsize=None) +def is_valid_xhiveframework_branch(xhiveframework_path: str, xhiveframework_branch: str): + """Check if a branch exists in a repo. Throws InvalidRemoteException if branch is not found + + Uses native git command to check for branches on a remote. + + :param xhiveframework_path: git url + :type xhiveframework_path: str + :param xhiveframework_branch: branch to check + :type xhiveframework_branch: str + :raises InvalidRemoteException: branch for this repo doesn't exist + """ + from git.cmd import Git + from git.exc import GitCommandError + + g = Git() + + if xhiveframework_branch: + try: + res = g.ls_remote("--heads", "--tags", xhiveframework_path, xhiveframework_branch) + if not res: + raise InvalidRemoteException( + f"Invalid branch or tag: {xhiveframework_branch} for the remote {xhiveframework_path}" + ) + except GitCommandError as e: + raise InvalidRemoteException(f"Invalid xhiveframework path: {xhiveframework_path}") from e + + +def log(message, level=0, no_log=False, stderr=False): + import bench + import bench.cli + + levels = { + 0: ("blue", "INFO"), # normal + 1: ("green", "SUCCESS"), # success + 2: ("red", "ERROR"), # fail + 3: ("yellow", "WARN"), # warn/suggest + } + + color, prefix = levels.get(level, levels[0]) + + if bench.cli.from_command_line and bench.cli.dynamic_feed: + bench.LOG_BUFFER.append({"prefix": prefix, "message": message, "color": color}) + + if no_log: + click.secho(message, fg=color, err=stderr) + else: + loggers = {2: logger.error, 3: logger.warning} + level_logger = loggers.get(level, logger.info) + + level_logger(message) + click.secho(f"{prefix}: {message}", fg=color, err=stderr) + + +def check_latest_version(): + if VERSION.endswith("dev"): + return + + import requests + from semantic_version import Version + + try: + pypi_request = requests.get("https://pypi.org/pypi/xhiveframework-bench/json") + except Exception: + # Exceptions thrown are defined in requests.exceptions + # ignore checking on all Exceptions + return + + if pypi_request.status_code == 200: + pypi_version_str = pypi_request.json().get("info").get("version") + pypi_version = Version(pypi_version_str) + local_version = Version(VERSION) + + if pypi_version > local_version: + log( + f"A newer version of bench is available: {local_version} → {pypi_version}", + stderr=True, + ) + + +def pause_exec(seconds=10): + from time import sleep + + for i in range(seconds, 0, -1): + print(f"Will continue execution in {i} seconds...", end="\r") + sleep(1) + + print(" " * 40, end="\r") + + +def exec_cmd(cmd, cwd=".", env=None, _raise=True): + if env: + env.update(os.environ.copy()) + + click.secho(f"$ {cmd}", fg="bright_black") + + cwd_info = f"cd {cwd} && " if cwd != "." else "" + cmd_log = f"{cwd_info}{cmd}" + logger.debug(cmd_log) + spl_cmd = split(cmd) + return_code = subprocess.call(spl_cmd, cwd=cwd, universal_newlines=True, env=env) + if return_code: + logger.warning(f"{cmd_log} executed with exit code {return_code}") + if _raise: + raise CommandFailedError(cmd) from subprocess.CalledProcessError(return_code, cmd) + return return_code + + +def which(executable: str, raise_err: bool = False) -> str: + from shutil import which + + exec_ = which(executable) + + if not exec_ and raise_err: + raise FileNotFoundError(f"{executable} not found in PATH") + + return exec_ + + +def setup_logging(bench_path=".") -> logging.Logger: + LOG_LEVEL = 15 + logging.addLevelName(LOG_LEVEL, "LOG") + + def logv(self, message, *args, **kws): + if self.isEnabledFor(LOG_LEVEL): + self._log(LOG_LEVEL, message, args, **kws) + + logging.Logger.log = logv + + if os.path.exists(os.path.join(bench_path, "logs")): + log_file = os.path.join(bench_path, "logs", "bench.log") + hdlr = logging.FileHandler(log_file) + else: + hdlr = logging.NullHandler() + + logger = logging.getLogger(PROJECT_NAME) + formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s") + hdlr.setFormatter(formatter) + logger.addHandler(hdlr) + logger.setLevel(logging.DEBUG) + + return logger + + +def get_process_manager() -> str: + for proc_man in ["honcho", "foreman", "forego"]: + proc_man_path = which(proc_man) + if proc_man_path: + return proc_man_path + + +def get_git_version() -> float: + """returns git version from `git --version` + extracts version number from string `get version 1.9.1` etc""" + version = get_cmd_output("git --version") + version = version.strip().split()[2] + version = ".".join(version.split(".")[0:2]) + return float(version) + + +def get_cmd_output(cmd, cwd=".", _raise=True): + output = "" + try: + output = subprocess.check_output( + cmd, cwd=cwd, shell=True, stderr=subprocess.PIPE, encoding="utf-8" + ).strip() + except subprocess.CalledProcessError as e: + if e.output: + output = e.output + elif _raise: + raise + return output + + +def is_root(): + return os.getuid() == 0 + + +def run_xhiveframework_cmd(*args, **kwargs): + from bench.cli import from_command_line + from bench.utils.bench import get_env_cmd + + bench_path = kwargs.get("bench_path", ".") + f = get_env_cmd("python", bench_path=bench_path) + sites_dir = os.path.join(bench_path, "sites") + + is_async = not from_command_line + if is_async: + stderr = stdout = subprocess.PIPE + else: + stderr = stdout = None + + p = subprocess.Popen( + (f, "-m", "xhiveframework.utils.bench_helper", "xhiveframework") + args, + cwd=sites_dir, + stdout=stdout, + stderr=stderr, + ) + + return_code = print_output(p) if is_async else p.wait() + if return_code > 0: + sys.exit(return_code) + + +def print_output(p): + from select import select + + while p.poll() is None: + readx = select([p.stdout.fileno(), p.stderr.fileno()], [], [])[0] + send_buffer = [] + for fd in readx: + if fd == p.stdout.fileno(): + while 1: + buf = p.stdout.read(1) + if not len(buf): + break + if buf == "\r" or buf == "\n": + send_buffer.append(buf) + log_line("".join(send_buffer), "stdout") + send_buffer = [] + else: + send_buffer.append(buf) + + if fd == p.stderr.fileno(): + log_line(p.stderr.readline(), "stderr") + return p.poll() + + +def log_line(data, stream): + if stream == "stderr": + return sys.stderr.write(data) + return sys.stdout.write(data) + + +def get_bench_name(bench_path): + return os.path.basename(os.path.abspath(bench_path)) + + +def set_git_remote_url(git_url, bench_path="."): + "Set app remote git url" + from bench.app import get_repo_dir + from bench.bench import Bench + + app = git_url.rsplit("/", 1)[1].rsplit(".", 1)[0] + + if app not in Bench(bench_path).apps: + raise AppNotInstalledError(f"No app named {app}") + + app_dir = get_repo_dir(app, bench_path=bench_path) + + if os.path.exists(os.path.join(app_dir, ".git")): + exec_cmd(f"git remote set-url upstream {git_url}", cwd=app_dir) + + +def run_playbook(playbook_name, extra_vars=None, tag=None): + import bench + + if not which("ansible"): + print( + "Ansible is needed to run this command, please install it using 'pip" + " install ansible'" + ) + sys.exit(1) + args = ["ansible-playbook", "-c", "local", playbook_name, "-vvvv"] + + if extra_vars: + args.extend(["-e", json.dumps(extra_vars)]) + + if tag: + args.extend(["-t", tag]) + + subprocess.check_call(args, cwd=os.path.join(bench.__path__[0], "playbooks")) + + +def find_benches(directory: str = None) -> List: + if not directory: + directory = os.path.expanduser("~") + elif os.path.exists(directory): + directory = os.path.abspath(directory) + else: + log("Directory doesn't exist", level=2) + sys.exit(1) + + if is_bench_directory(directory): + if os.path.curdir == directory: + print("You are in a bench directory!") + else: + print(f"{directory} is a bench directory!") + return + + benches = [] + + try: + sub_directories = os.listdir(directory) + except PermissionError: + return benches + + for sub in sub_directories: + sub = os.path.join(directory, sub) + if os.path.isdir(sub) and not os.path.islink(sub): + if is_bench_directory(sub): + print(f"{sub} found!") + benches.append(sub) + else: + benches.extend(find_benches(sub)) + + return benches + + +def is_dist_editable(dist: str) -> bool: + """Is distribution an editable install?""" + for path_item in sys.path: + egg_link = os.path.join(path_item, f"{dist}.egg-link") + if os.path.isfile(egg_link): + return True + return False + + +def find_parent_bench(path: str) -> str: + """Checks if parent directories are benches""" + if is_bench_directory(directory=path): + return path + + home_path = os.path.expanduser("~") + root_path = os.path.abspath(os.sep) + + if path not in {home_path, root_path}: + # NOTE: the os.path.split assumes that given path is absolute + parent_dir = os.path.split(path)[0] + return find_parent_bench(parent_dir) + + +def get_env_xhiveframework_commands(bench_path=".") -> List: + """Caches all available commands (even custom apps) via Xhiveframework + Default caching behaviour: generated the first time any command (for a specific bench directory) + """ + from bench.utils.bench import get_env_cmd + + python = get_env_cmd("python", bench_path=bench_path) + sites_path = os.path.join(bench_path, "sites") + + try: + return json.loads( + get_cmd_output( + f"{python} -m xhiveframework.utils.bench_helper get-xhiveframework-commands", cwd=sites_path + ) + ) + + except subprocess.CalledProcessError as e: + if hasattr(e, "stderr"): + print(e.stderr) + + return [] + + +def find_org(org_repo, using_cached: bool=False): + import requests + + org_repo = org_repo[0] + + for org in ["xhiveframework", "xhiveerp"]: + res = requests.head(f"https://api.github.com/repos/{org}/{org_repo}") + if res.status_code in (400, 403): + res = requests.head(f"https://lab.membtech.com/{org}/{org_repo}") + if res.ok: + return org, org_repo + + if using_cached: + return "", org_repo + + raise InvalidRemoteException(f"{org_repo} not found under xhiveframework or xhiveerp GitHub accounts") + + +def fetch_details_from_tag(_tag: str, using_cached: bool=False) -> Tuple[str, str, str]: + if not _tag: + raise Exception("Tag is not provided") + + app_tag = _tag.split("@") + org_repo = app_tag[0].split("/") + + try: + repo, tag = app_tag + except ValueError: + repo, tag = app_tag + [None] + + try: + org, repo = org_repo + except Exception: + org, repo = find_org(org_repo, using_cached) + + return org, repo, tag + + +def is_git_url(url: str) -> bool: + # modified to allow without the tailing .git from https://github.com/jonschlinkert/is-git-url.git + pattern = r"(?:git|ssh|https?|\w*@[-\w.]+):(\/\/)?(.*?)(\.git)?(\/?|\#[-\d\w._]+?)$" + return bool(re.match(pattern, url)) + + +def drop_privileges(uid_name="nobody", gid_name="nogroup"): + import grp + import pwd + + # from http://stackoverflow.com/a/2699996 + if os.getuid() != 0: + # We're not root so, like, whatever dude + return + + # Get the uid/gid from the name + running_uid = pwd.getpwnam(uid_name).pw_uid + running_gid = grp.getgrnam(gid_name).gr_gid + + # Remove group privileges + os.setgroups([]) + + # Try setting the new uid/gid + os.setgid(running_gid) + os.setuid(running_uid) + + # Ensure a very conservative umask + os.umask(0o22) + + +def get_available_folder_name(name: str, path: str) -> str: + """Subfixes the passed name with -1 uptil -100 whatever's available""" + if os.path.exists(os.path.join(path, name)): + for num in range(1, 100): + _dt = f"{name}_{num}" + if not os.path.exists(os.path.join(path, _dt)): + return _dt + return name + + +def get_traceback() -> str: + """Returns the traceback of the Exception""" + from traceback import format_exception + + exc_type, exc_value, exc_tb = sys.exc_info() + + if not any([exc_type, exc_value, exc_tb]): + return "" + + trace_list = format_exception(exc_type, exc_value, exc_tb) + return "".join(trace_list) + + +class _dict(dict): + """dict like object that exposes keys as attributes""" + + # bench port of xhiveframework._dict + def __getattr__(self, key): + ret = self.get(key) + # "__deepcopy__" exception added to fix xhiveframework#14833 via DFP + if not ret and key.startswith("__") and key != "__deepcopy__": + raise AttributeError() + return ret + + def __setattr__(self, key, value): + self[key] = value + + def __getstate__(self): + return self + + def __setstate__(self, d): + self.update(d) + + def update(self, d): + """update and return self -- the missing dict feature in python""" + super().update(d) + return self + + def copy(self): + return _dict(dict(self).copy()) + + +def get_cmd_from_sysargv(): + """Identify and segregate tokens to options and command + + For Command: `bench --profile --site xhiveframework.com migrate --no-backup` + sys.argv: ["/home/xhiveframework/.local/bin/bench", "--profile", "--site", "xhiveframework.com", "migrate", "--no-backup"] + Actual command run: migrate + + """ + # context is passed as options to xhiveframework's bench_helper + from bench.bench import Bench + + xhiveframework_context = _dict(params={"--site"}, flags={"--verbose", "--profile", "--force"}) + cmd_from_ctx = None + sys_argv = sys.argv[1:] + skip_next = False + + for arg in sys_argv: + if skip_next: + skip_next = False + continue + + if arg in xhiveframework_context.flags: + continue + + elif arg in xhiveframework_context.params: + skip_next = True + continue + + if sys_argv.index(arg) == 0 and arg in Bench(".").apps: + continue + + cmd_from_ctx = arg + + break + + return cmd_from_ctx + + +def get_app_cache_extract_filter( + count_threshold: int = 10_000, + size_threshold: int = 1_000_000_000, +): # -> Callable[[TarInfo, str], TarInfo | None] + state = dict(count=0, size=0) + + AbsoluteLinkError = Exception + def data_filter(m: TarInfo, _:str) -> TarInfo: + return m + + if (sys.version_info.major == 3 and sys.version_info.minor > 7) or sys.version_info.major > 3: + from tarfile import data_filter, AbsoluteLinkError + + def filter_function(member: TarInfo, dest_path: str) -> Optional[TarInfo]: + state["count"] += 1 + state["size"] += member.size + + if state["count"] > count_threshold: + raise RuntimeError(f"Number of entries exceeds threshold ({state['count']})") + + if state["size"] > size_threshold: + raise RuntimeError(f"Extracted size exceeds threshold ({state['size']})") + + try: + return data_filter(member, dest_path) + except AbsoluteLinkError: + # Links created by `xhiveframework` after extraction + return None + + return filter_function diff --git a/bench/utils/app.py b/bench/utils/app.py new file mode 100644 index 0000000..7c27f84 --- /dev/null +++ b/bench/utils/app.py @@ -0,0 +1,305 @@ +# imports - standard imports +import os +import pathlib +import re +import sys +import subprocess +from typing import List, Optional +from functools import lru_cache + +# imports - module imports +from bench.exceptions import ( + InvalidRemoteException, + InvalidBranchException, + CommandFailedError, + VersionNotFound, +) +from bench.app import get_repo_dir + + +def is_version_upgrade(app="xhiveframework", bench_path=".", branch=None): + upstream_version = get_upstream_version(app=app, branch=branch, bench_path=bench_path) + + if not upstream_version: + raise InvalidBranchException( + f"Specified branch of app {app} is not in upstream remote" + ) + + local_version = get_major_version(get_current_version(app, bench_path=bench_path)) + upstream_version = get_major_version(upstream_version) + + if upstream_version > local_version: + return (True, local_version, upstream_version) + + return (False, local_version, upstream_version) + + +def switch_branch(branch, apps=None, bench_path=".", upgrade=False, check_upgrade=True): + import git + from bench.bench import Bench + from bench.utils import log, exec_cmd + from bench.utils.bench import ( + build_assets, + patch_sites, + post_upgrade, + ) + from bench.utils.system import backup_all_sites + + apps_dir = os.path.join(bench_path, "apps") + version_upgrade = (False,) + switched_apps = [] + + if not apps: + apps = [ + name for name in os.listdir(apps_dir) if os.path.isdir(os.path.join(apps_dir, name)) + ] + + for app in apps: + app_dir = os.path.join(apps_dir, app) + + if not os.path.exists(app_dir): + log(f"{app} does not exist!", level=2) + continue + + repo = git.Repo(app_dir) + unshallow_flag = os.path.exists(os.path.join(app_dir, ".git", "shallow")) + log(f"Fetching upstream {'unshallow ' if unshallow_flag else ''}for {app}") + + exec_cmd("git remote set-branches upstream '*'", cwd=app_dir) + exec_cmd( + f"git fetch --all{' --unshallow' if unshallow_flag else ''} --quiet", cwd=app_dir + ) + + if check_upgrade: + version_upgrade = is_version_upgrade(app=app, bench_path=bench_path, branch=branch) + if version_upgrade[0] and not upgrade: + log( + f"Switching to {branch} will cause upgrade from" + f" {version_upgrade[1]} to {version_upgrade[2]}. Pass --upgrade to" + " confirm", + level=2, + ) + sys.exit(1) + + print("Switching for " + app) + exec_cmd(f"git checkout -f {branch}", cwd=app_dir) + + if str(repo.active_branch) == branch: + switched_apps.append(app) + else: + log(f"Switching branches failed for: {app}", level=2) + + if switched_apps: + log(f"Successfully switched branches for: {', '.join(switched_apps)}", level=1) + print( + "Please run `bench update --patch` to be safe from any differences in" + " database schema" + ) + + if version_upgrade[0] and upgrade: + Bench(bench_path).setup.requirements() + backup_all_sites() + patch_sites() + build_assets() + post_upgrade(version_upgrade[1], version_upgrade[2]) + + +def switch_to_branch(branch=None, apps=None, bench_path=".", upgrade=False): + switch_branch(branch, apps=apps, bench_path=bench_path, upgrade=upgrade) + + +def switch_to_develop(apps=None, bench_path=".", upgrade=True): + switch_branch("develop", apps=apps, bench_path=bench_path, upgrade=upgrade) + + +def get_version_from_string(contents, field="__version__"): + match = re.search( + r"^(\s*%s\s*=\s*['\\\"])(.+?)(['\"])" % field, contents, flags=(re.S | re.M) + ) + if not match: + raise VersionNotFound(f"{contents} is not a valid version") + return match.group(2) + + +def get_major_version(version): + import semantic_version + + return semantic_version.Version(version).major + + +def get_develop_version(app, bench_path="."): + repo_dir = get_repo_dir(app, bench_path=bench_path) + with open(os.path.join(repo_dir, os.path.basename(repo_dir), "hooks.py")) as f: + return get_version_from_string(f.read(), field="develop_version") + + +def get_upstream_version(app, branch=None, bench_path="."): + repo_dir = get_repo_dir(app, bench_path=bench_path) + if not branch: + branch = get_current_branch(app, bench_path=bench_path) + + try: + subprocess.call( + f"git fetch --depth=1 --no-tags upstream {branch}", shell=True, cwd=repo_dir + ) + except CommandFailedError: + raise InvalidRemoteException(f"Failed to fetch from remote named upstream for {app}") + + try: + contents = subprocess.check_output( + f"git show upstream/{branch}:{app}/__init__.py", + shell=True, + cwd=repo_dir, + stderr=subprocess.STDOUT, + ) + contents = contents.decode("utf-8") + except subprocess.CalledProcessError as e: + if b"Invalid object" in e.output: + return None + else: + raise + return get_version_from_string(contents) + + +def get_current_xhiveframework_version(bench_path="."): + try: + return get_major_version(get_current_version("xhiveframework", bench_path=bench_path)) + except OSError: + return 0 + + +def get_current_branch(app, bench_path="."): + from bench.utils import get_cmd_output + + repo_dir = get_repo_dir(app, bench_path=bench_path) + return get_cmd_output("basename $(git symbolic-ref -q HEAD)", cwd=repo_dir) + + +@lru_cache(maxsize=5) +def get_required_deps(org, name, branch, deps="hooks.py"): + import requests + import base64 + + git_api_url = f"https://api.github.com/repos/{org}/{name}/contents/{name}/{deps}" + params = {"ref": branch or "develop"} + res = requests.get(url=git_api_url, params=params).json() + + if "message" in res: + git_url = ( + f"https://raw.githubusercontent.com/{org}/{name}/{params['ref']}/{name}/{deps}" + ) + return requests.get(git_url).text + + return base64.decodebytes(res["content"].encode()).decode() + + +def required_apps_from_hooks(required_deps: str, local: bool = False) -> List: + import ast + + required_apps_re = re.compile(r"required_apps\s+=\s+(.*)") + + if local: + required_deps = pathlib.Path(required_deps).read_text() + + _req_apps_tag = required_apps_re.search(required_deps) + req_apps_tag = _req_apps_tag[1] + return ast.literal_eval(req_apps_tag) + + +def get_remote(app, bench_path="."): + repo_dir = get_repo_dir(app, bench_path=bench_path) + contents = subprocess.check_output( + ["git", "remote", "-v"], cwd=repo_dir, stderr=subprocess.STDOUT + ) + contents = contents.decode("utf-8") + if re.findall(r"upstream[\s]+", contents): + return "upstream" + elif not contents: + # if contents is an empty string => remote doesn't exist + return False + else: + # get the first remote + return contents.splitlines()[0].split()[0] + + +def get_app_name(bench_path: str, folder_name: str) -> str: + """Retrieves `name` attribute of app - equivalent to distribution name + of python package. Fetches from pyproject.toml, setup.cfg or setup.py + whichever defines it in that order. + """ + app_name = None + apps_path = os.path.join(os.path.abspath(bench_path), "apps") + + config_py_path = os.path.join(apps_path, folder_name, "setup.cfg") + setup_py_path = os.path.join(apps_path, folder_name, "setup.py") + + pyproject_path = os.path.join(apps_path, folder_name, "pyproject.toml") + pyproject = get_pyproject(pyproject_path) + if pyproject: + app_name = pyproject.get("project", {}).get("name") + + if not app_name and os.path.exists(config_py_path): + from setuptools.config import read_configuration + + config = read_configuration(config_py_path) + app_name = config.get("metadata", {}).get("name") + + if not app_name: + # retrieve app name from setup.py as fallback + with open(setup_py_path, "rb") as f: + app_name = re.search(r'name\s*=\s*[\'"](.*)[\'"]', f.read().decode("utf-8"))[1] + + if app_name and folder_name != app_name: + os.rename(os.path.join(apps_path, folder_name), os.path.join(apps_path, app_name)) + return app_name + + return folder_name + + +def get_pyproject(pyproject_path: str) -> Optional[dict]: + if not os.path.exists(pyproject_path): + return None + + try: + from tomli import load + except ImportError: + from tomllib import load + + with open(pyproject_path, "rb") as f: + return load(f) + + +def check_existing_dir(bench_path, repo_name): + cloned_path = os.path.join(bench_path, "apps", repo_name) + dir_already_exists = os.path.isdir(cloned_path) + return dir_already_exists, cloned_path + + +def get_current_version(app, bench_path="."): + current_version = None + repo_dir = get_repo_dir(app, bench_path=bench_path) + pyproject_path = os.path.join(repo_dir, "pyproject.toml") + config_path = os.path.join(repo_dir, "setup.cfg") + init_path = os.path.join(repo_dir, os.path.basename(repo_dir), "__init__.py") + setup_path = os.path.join(repo_dir, "setup.py") + + try: + pyproject = get_pyproject(pyproject_path) + if pyproject: + current_version = pyproject.get("project", {}).get("version") + + if not current_version and os.path.exists(config_path): + from setuptools.config import read_configuration + + config = read_configuration(config_path) + current_version = config.get("metadata", {}).get("version") + if not current_version: + with open(init_path) as f: + current_version = get_version_from_string(f.read()) + + except (AttributeError, VersionNotFound): + # backward compatibility + with open(setup_path) as f: + current_version = get_version_from_string(f.read(), field="version") + + return current_version diff --git a/bench/utils/bench.py b/bench/utils/bench.py new file mode 100644 index 0000000..c9f291e --- /dev/null +++ b/bench/utils/bench.py @@ -0,0 +1,772 @@ +# imports - standard imports +import contextlib +import json +import logging +import os +import re +import shutil +import subprocess +import sys +from functools import lru_cache +from glob import glob +from json.decoder import JSONDecodeError +from pathlib import Path + +# imports - third party imports +import click + +# imports - module imports +import bench +from bench.exceptions import PatchError, ValidationError +from bench.utils import ( + exec_cmd, + get_bench_cache_path, + get_bench_name, + get_cmd_output, + log, + which, +) + +logger = logging.getLogger(bench.PROJECT_NAME) + + +@lru_cache(maxsize=None) +def get_env_cmd(cmd: str, bench_path: str = ".") -> str: + exact_location = os.path.abspath( + os.path.join(bench_path, "env", "bin", cmd.strip("*")) + ) + if os.path.exists(exact_location): + return exact_location + + # this supports envs' generated by patched virtualenv or venv (which may cause an extra 'local' folder to be created) + existing_python_bins = glob( + os.path.join(bench_path, "env", "**", "bin", cmd), recursive=True + ) + + if existing_python_bins: + return os.path.abspath(existing_python_bins[0]) + + return exact_location + + +def get_venv_path(verbose=False, python="python3"): + with open(os.devnull, "wb") as devnull: + is_venv_installed = not subprocess.call( + [python, "-m", "venv", "--help"], stdout=devnull + ) + if is_venv_installed: + return f"{python} -m venv" + else: + log("venv cannot be found", level=2) + + +def update_node_packages(bench_path=".", apps=None, verbose=None): + print("Updating node packages...") + from distutils.version import LooseVersion + + from bench.utils.app import get_develop_version + + v = LooseVersion(get_develop_version("xhiveframework", bench_path=bench_path)) + + # After rollup was merged, xhiveframework_version = 10.1 + # if develop_verion is 11 and up, only then install yarn + if v < LooseVersion("11.x.x-develop"): + update_npm_packages(bench_path, apps=apps, verbose=verbose) + else: + update_yarn_packages(bench_path, apps=apps, verbose=verbose) + + +def install_python_dev_dependencies(bench_path=".", apps=None, verbose=False): + import bench.cli + from bench.bench import Bench + + verbose = bench.cli.verbose or verbose + quiet_flag = "" if verbose else "--quiet" + + bench = Bench(bench_path) + + if isinstance(apps, str): + apps = [apps] + elif not apps: + apps = bench.get_installed_apps() + + for app in apps: + pyproject_deps = None + app_path = os.path.join(bench_path, "apps", app) + pyproject_path = os.path.join(app_path, "pyproject.toml") + dev_requirements_path = os.path.join(app_path, "dev-requirements.txt") + + if os.path.exists(pyproject_path): + pyproject_deps = _generate_dev_deps_pattern(pyproject_path) + if pyproject_deps: + bench.run(f"{bench.python} -m pip install {quiet_flag} --upgrade {pyproject_deps}") + + if not pyproject_deps and os.path.exists(dev_requirements_path): + bench.run( + f"{bench.python} -m pip install {quiet_flag} --upgrade -r {dev_requirements_path}" + ) + + +def _generate_dev_deps_pattern(pyproject_path): + try: + from tomli import loads + except ImportError: + from tomllib import loads + + requirements_pattern = "" + pyroject_config = loads(open(pyproject_path).read()) + + with contextlib.suppress(KeyError): + for pkg, version in pyroject_config["tool"]["bench"]["dev-dependencies"].items(): + op = "==" if "=" not in version else "" + requirements_pattern += f"{pkg}{op}{version} " + return requirements_pattern + + +def update_yarn_packages(bench_path=".", apps=None, verbose=None): + import bench.cli as bench_cli + from bench.bench import Bench + + verbose = bench_cli.verbose or verbose + bench = Bench(bench_path) + apps = apps or bench.apps + apps_dir = os.path.join(bench.name, "apps") + + # TODO: Check for stuff like this early on only?? + if not which("yarn"): + print("Please install yarn using below command and try again.") + print("`npm install -g yarn`") + return + + for app in apps: + app_path = os.path.join(apps_dir, app) + if os.path.exists(os.path.join(app_path, "package.json")): + click.secho(f"\nInstalling node dependencies for {app}", fg="yellow") + yarn_install = "yarn install --check-files" + if verbose: + yarn_install += " --verbose" + bench.run(yarn_install, cwd=app_path) + + +def update_npm_packages(bench_path=".", apps=None, verbose=None): + verbose = bench.cli.verbose or verbose + npm_install = "npm install --verbose" if verbose else "npm install" + apps_dir = os.path.join(bench_path, "apps") + package_json = {} + + if not apps: + apps = os.listdir(apps_dir) + + for app in apps: + package_json_path = os.path.join(apps_dir, app, "package.json") + + if os.path.exists(package_json_path): + with open(package_json_path) as f: + app_package_json = json.loads(f.read()) + # package.json is usually a dict in a dict + for key, value in app_package_json.items(): + if key not in package_json: + package_json[key] = value + else: + if isinstance(value, dict): + package_json[key].update(value) + elif isinstance(value, list): + package_json[key].extend(value) + else: + package_json[key] = value + + if package_json == {}: + with open(os.path.join(os.path.dirname(__file__), "package.json")) as f: + package_json = json.loads(f.read()) + + with open(os.path.join(bench_path, "package.json"), "w") as f: + f.write(json.dumps(package_json, indent=1, sort_keys=True)) + + exec_cmd(npm_install, cwd=bench_path) + + +def migrate_env(python, backup=False): + import shutil + from urllib.parse import urlparse + + from bench.bench import Bench + + bench = Bench(".") + nvenv = "env" + path = os.getcwd() + python = which(python) + pvenv = os.path.join(path, nvenv) + + if python.startswith(pvenv): + # The supplied python version is in active virtualenv which we are about to nuke. + click.secho( + "Python version supplied is present in currently sourced virtual environment.\n" + "`deactiviate` the current virtual environment before migrating environments.", + fg="yellow", + ) + sys.exit(1) + + # Clear Cache before Bench Dies. + try: + config = bench.conf + rredis = urlparse(config["redis_cache"]) + redis = f"{which('redis-cli')} -p {rredis.port}" + + logger.log("Clearing Redis Cache...") + exec_cmd(f"{redis} FLUSHALL") + logger.log("Clearing Redis DataBase...") + exec_cmd(f"{redis} FLUSHDB") + except Exception: + logger.warning("Please ensure Redis Connections are running or Daemonized.") + + # Backup venv: restore using `virtualenv --relocatable` if needed + if backup: + from datetime import datetime + + parch = os.path.join(path, "archived", "envs") + os.makedirs(parch, exist_ok=True) + + source = os.path.join(path, "env") + target = parch + + logger.log("Backing up Virtual Environment") + stamp = datetime.now().strftime("%Y%m%d_%H%M%S") + dest = os.path.join(path, str(stamp)) + + os.rename(source, dest) + shutil.move(dest, target) + + # Create virtualenv using specified python + def _install_app(app): + app_path = f"-e {os.path.join('apps', app)}" + exec_cmd(f"{pvenv}/bin/python -m pip install --upgrade {app_path}") + + try: + logger.log(f"Setting up a New Virtual {python} Environment") + exec_cmd(f"{python} -m venv {pvenv}") + + # Install xhiveframework first + _install_app("xhiveframework") + for app in bench.apps: + if str(app) != "xhiveframework": + _install_app(app) + + logger.log(f"Migration Successful to {python}") + except Exception: + logger.warning("Python env migration Error", exc_info=True) + raise + + +def validate_upgrade(from_ver, to_ver, bench_path="."): + if to_ver >= 6 and not which("npm") and not which("node") and not which("nodejs"): + raise Exception("Please install nodejs and npm") + + +def post_upgrade(from_ver, to_ver, bench_path="."): + from bench.bench import Bench + from bench.config import redis + from bench.config.nginx import make_nginx_conf + from bench.config.supervisor import generate_supervisor_config + + conf = Bench(bench_path).conf + print("-" * 80 + f"Your bench was upgraded to version {to_ver}") + + if conf.get("restart_supervisor_on_update"): + redis.generate_config(bench_path=bench_path) + generate_supervisor_config(bench_path=bench_path) + make_nginx_conf(bench_path=bench_path) + print( + "As you have setup your bench for production, you will have to reload" + " configuration for nginx and supervisor. To complete the migration, please" + " run the following commands:\nsudo service nginx restart\nsudo" + " supervisorctl reload" + ) + + +def patch_sites(bench_path="."): + from bench.bench import Bench + from bench.utils.system import migrate_site + + bench = Bench(bench_path) + + for site in bench.sites: + try: + migrate_site(site, bench_path=bench_path) + except subprocess.CalledProcessError: + raise PatchError + + +def restart_supervisor_processes(bench_path=".", web_workers=False, _raise=False): + from bench.bench import Bench + + bench = Bench(bench_path) + conf = bench.conf + cmd = conf.get("supervisor_restart_cmd") + bench_name = get_bench_name(bench_path) + + if cmd: + bench.run(cmd, _raise=_raise) + + else: + sudo = "" + try: + supervisor_status = get_cmd_output("supervisorctl status", cwd=bench_path) + except subprocess.CalledProcessError as e: + if e.returncode == 127: + log("restart failed: Couldn't find supervisorctl in PATH", level=3) + return + sudo = "sudo " + supervisor_status = get_cmd_output("sudo supervisorctl status", cwd=bench_path) + + if not sudo and ( + "error: , [Errno 13] Permission denied" in supervisor_status + ): + sudo = "sudo " + supervisor_status = get_cmd_output("sudo supervisorctl status", cwd=bench_path) + + if web_workers and f"{bench_name}-web:" in supervisor_status: + groups = [f"{bench_name}-web:\t"] + + elif f"{bench_name}-workers:" in supervisor_status: + groups = [f"{bench_name}-web:", f"{bench_name}-workers:"] + + # backward compatibility + elif f"{bench_name}-processes:" in supervisor_status: + groups = [f"{bench_name}-processes:"] + + # backward compatibility + else: + groups = ["xhiveframework:"] + + for group in groups: + failure = bench.run(f"{sudo}supervisorctl restart {group}", _raise=_raise) + if failure: + log( + f"restarting supervisor group `{group}` failed. Use `bench restart` to retry.", + level=3, + ) + + +def restart_systemd_processes(bench_path=".", web_workers=False, _raise=True): + bench_name = get_bench_name(bench_path) + exec_cmd( + f"sudo systemctl stop -- $(systemctl show -p Requires {bench_name}.target | cut" + " -d= -f2)", + _raise=_raise, + ) + exec_cmd( + f"sudo systemctl start -- $(systemctl show -p Requires {bench_name}.target |" + " cut -d= -f2)", + _raise=_raise, + ) + + +def restart_process_manager(bench_path=".", web_workers=False): + # only overmind has the restart feature, not sure other supported procmans do + if which("overmind") and os.path.exists(os.path.join(bench_path, ".overmind.sock")): + worker = "web" if web_workers else "" + exec_cmd(f"overmind restart {worker}", cwd=bench_path) + + +def build_assets(bench_path=".", app=None, using_cached=False): + command = "bench build" + if app: + command += f" --app {app}" + + env = {"BENCH_DEVELOPER": "1"} + if using_cached: + env["USING_CACHED"] = "1" + + exec_cmd(command, cwd=bench_path, env=env) + + +def handle_version_upgrade(version_upgrade, bench_path, force, reset, conf): + from bench.utils import log, pause_exec + + if version_upgrade[0]: + if force: + log( + """Force flag has been used for a major version change in Xhiveframework and it's apps. +This will take significant time to migrate and might break custom apps.""", + level=3, + ) + else: + print( + f"""This update will cause a major version change in Xhiveframework/XhiveERP from {version_upgrade[1]} to {version_upgrade[2]}. +This would take significant time to migrate and might break custom apps.""" + ) + click.confirm("Do you want to continue?", abort=True) + + if not reset and conf.get("shallow_clone"): + log( + """shallow_clone is set in your bench config. +However without passing the --reset flag, your repositories will be unshallowed. +To avoid this, cancel this operation and run `bench update --reset`. + +Consider the consequences of `git reset --hard` on your apps before you run that. +To avoid seeing this warning, set shallow_clone to false in your common_site_config.json + """, + level=3, + ) + pause_exec(seconds=10) + + if version_upgrade[0] or (not version_upgrade[0] and force): + validate_upgrade(version_upgrade[1], version_upgrade[2], bench_path=bench_path) + + +def update( + pull: bool = False, + apps: str = None, + patch: bool = False, + build: bool = False, + requirements: bool = False, + backup: bool = True, + compile: bool = True, + force: bool = False, + reset: bool = False, + restart_supervisor: bool = False, + restart_systemd: bool = False, +): + """command: bench update""" + import re + + from bench import patches + from bench.app import pull_apps + from bench.bench import Bench + from bench.config.common_site_config import update_config + from bench.exceptions import CannotUpdateReleaseBench + from bench.utils.app import is_version_upgrade + from bench.utils.system import backup_all_sites + + bench_path = os.path.abspath(".") + bench = Bench(bench_path) + patches.run(bench_path=bench_path) + conf = bench.conf + + if conf.get("release_bench"): + raise CannotUpdateReleaseBench("Release bench detected, cannot update!") + + if not (pull or patch or build or requirements): + pull, patch, build, requirements = True, True, True, True + + if apps and pull: + apps = [app.strip() for app in re.split(",| ", apps) if app] + else: + apps = [] + + validate_branch() + + version_upgrade = is_version_upgrade() + handle_version_upgrade(version_upgrade, bench_path, force, reset, conf) + + conf.update({"maintenance_mode": 1, "pause_scheduler": 1}) + update_config(conf, bench_path=bench_path) + + if backup: + print("Backing up sites...") + backup_all_sites(bench_path=bench_path) + + if pull: + print("Updating apps source...") + pull_apps(apps=apps, bench_path=bench_path, reset=reset) + + if requirements: + print("Setting up requirements...") + bench.setup.requirements() + + if patch: + print("Patching sites...") + patch_sites(bench_path=bench_path) + + if build: + print("Building assets...") + bench.build() + + if version_upgrade[0] or (not version_upgrade[0] and force): + post_upgrade(version_upgrade[1], version_upgrade[2], bench_path=bench_path) + + bench.reload(web=False, supervisor=restart_supervisor, systemd=restart_systemd) + + conf.update({"maintenance_mode": 0, "pause_scheduler": 0}) + update_config(conf, bench_path=bench_path) + + print( + "_" * 80 + "\nBench: Deployment tool for Xhiveframework and Xhiveframework Applications" + " (https://lab.membtech.com/xhiveframework/bench_new.git).\nOpen source depends on your contributions, so do" + " give back by submitting bug reports, patches and fixes and be a part of the" + " community :)" + ) + + +def clone_apps_from(bench_path, clone_from, update_app=True): + from bench.app import install_app + + print(f"Copying apps from {clone_from}...") + subprocess.check_output(["cp", "-R", os.path.join(clone_from, "apps"), bench_path]) + + node_modules_path = os.path.join(clone_from, "node_modules") + if os.path.exists(node_modules_path): + print(f"Copying node_modules from {clone_from}...") + subprocess.check_output(["cp", "-R", node_modules_path, bench_path]) + + def setup_app(app): + # run git reset --hard in each branch, pull latest updates and install_app + app_path = os.path.join(bench_path, "apps", app) + + # remove .egg-ino + subprocess.check_output(["rm", "-rf", app + ".egg-info"], cwd=app_path) + + if update_app and os.path.exists(os.path.join(app_path, ".git")): + remotes = subprocess.check_output(["git", "remote"], cwd=app_path).strip().split() + if "upstream" in remotes: + remote = "upstream" + else: + remote = remotes[0] + print(f"Cleaning up {app}") + branch = subprocess.check_output( + ["git", "rev-parse", "--abbrev-ref", "HEAD"], cwd=app_path + ).strip() + subprocess.check_output(["git", "reset", "--hard"], cwd=app_path) + subprocess.check_output(["git", "pull", "--rebase", remote, branch], cwd=app_path) + + install_app(app, bench_path, restart_bench=False) + + with open(os.path.join(clone_from, "sites", "apps.txt")) as f: + apps = f.read().splitlines() + + for app in apps: + setup_app(app) + + +def remove_backups_crontab(bench_path="."): + from crontab import CronTab + + from bench.bench import Bench + + logger.log("removing backup cronjob") + + bench_dir = os.path.abspath(bench_path) + user = Bench(bench_dir).conf.get("xhiveframework_user") + logfile = os.path.join(bench_dir, "logs", "backup.log") + system_crontab = CronTab(user=user) + backup_command = f"cd {bench_dir} && {sys.argv[0]} --verbose --site all backup" + job_command = f"{backup_command} >> {logfile} 2>&1" + + system_crontab.remove_all(command=job_command) + + +def set_mariadb_host(host, bench_path="."): + update_common_site_config({"db_host": host}, bench_path=bench_path) + + +def set_redis_cache_host(host, bench_path="."): + update_common_site_config({"redis_cache": f"redis://{host}"}, bench_path=bench_path) + + +def set_redis_queue_host(host, bench_path="."): + update_common_site_config({"redis_queue": f"redis://{host}"}, bench_path=bench_path) + + +def set_redis_socketio_host(host, bench_path="."): + update_common_site_config({"redis_socketio": f"redis://{host}"}, bench_path=bench_path) + + +def update_common_site_config(ddict, bench_path="."): + filename = os.path.join(bench_path, "sites", "common_site_config.json") + + if os.path.exists(filename): + with open(filename) as f: + content = json.load(f) + + else: + content = {} + + content.update(ddict) + with open(filename, "w") as f: + json.dump(content, f, indent=1, sort_keys=True) + + +def validate_app_installed_on_sites(app, bench_path="."): + print("Checking if app installed on active sites...") + ret = check_app_installed(app, bench_path=bench_path) + + if ret is None: + check_app_installed_legacy(app, bench_path=bench_path) + else: + return ret + + +def check_app_installed(app, bench_path="."): + try: + out = subprocess.check_output( + ["bench", "--site", "all", "list-apps", "--format", "json"], + stderr=open(os.devnull, "wb"), + cwd=bench_path, + ).decode("utf-8") + except subprocess.CalledProcessError: + return None + + try: + apps_sites_dict = json.loads(out) + except JSONDecodeError: + return None + + for site, apps in apps_sites_dict.items(): + if app in apps: + raise ValidationError(f"Cannot remove, app is installed on site: {site}") + + +def check_app_installed_legacy(app, bench_path="."): + site_path = os.path.join(bench_path, "sites") + + for site in os.listdir(site_path): + req_file = os.path.join(site_path, site, "site_config.json") + if os.path.exists(req_file): + out = subprocess.check_output( + ["bench", "--site", site, "list-apps"], cwd=bench_path + ).decode("utf-8") + if re.search(r"\b" + app + r"\b", out): + print(f"Cannot remove, app is installed on site: {site}") + sys.exit(1) + + +def validate_branch(): + from bench.bench import Bench + from bench.utils.app import get_current_branch + + apps = Bench(".").apps + + installed_apps = set(apps) + check_apps = {"xhiveframework", "xhiveerp"} + intersection_apps = installed_apps.intersection(check_apps) + + for app in intersection_apps: + branch = get_current_branch(app) + + if branch == "master": + print( + """'master' branch is renamed to 'version-11' since 'version-12' release. +As of January 2020, the following branches are +version Xhiveframework XhiveERP +11 version-11 version-11 +12 version-12 version-12 +13 version-13 version-13 +14 develop develop + +Please switch to new branches to get future updates. +To switch to your required branch, run the following commands: bench switch-to-branch [branch-name]""" + ) + + sys.exit(1) + + +def cache_helper(clear=False, remove_app="", remove_key="") -> None: + can_remove = bool(remove_key or remove_app) + if not clear and not can_remove: + cache_list() + elif can_remove: + cache_remove(remove_app, remove_key) + elif clear: + cache_clear() + else: + pass # unreachable + + +def cache_list() -> None: + from datetime import datetime + + tot_size = 0 + tot_items = 0 + + printed_header = False + for item in get_bench_cache_path("apps").iterdir(): + if item.suffix not in [".tar", ".tgz"]: + continue + + stat = item.stat() + size_mb = stat.st_size / 1_000_000 + created = datetime.fromtimestamp(stat.st_ctime) + accessed = datetime.fromtimestamp(stat.st_atime) + + app = item.name.split("-")[0] + tot_items += 1 + tot_size += stat.st_size + compressed = item.suffix == ".tgz" + + if not printed_header: + click.echo( + f"{'APP':15} " + f"{'FILE':25} " + f"{'SIZE':>13} " + f"{'COMPRESSED'} " + f"{'CREATED':19} " + f"{'ACCESSED':19} " + ) + printed_header = True + + click.echo( + f"{app:15} " + f"{item.name:25} " + f"{size_mb:10.3f} MB " + f"{str(compressed):10} " + f"{created:%Y-%m-%d %H:%M:%S} " + f"{accessed:%Y-%m-%d %H:%M:%S} " + ) + + if tot_items: + click.echo(f"Total size {tot_size / 1_000_000:.3f} MB belonging to {tot_items} items") + else: + click.echo("No cached items") + + +def cache_remove(app: str = "", key: str = "") -> None: + rem_items = 0 + rem_size = 0 + for item in get_bench_cache_path("apps").iterdir(): + if not should_remove_item(item, app, key): + continue + + rem_items += 1 + rem_size += item.stat().st_size + item.unlink(True) + click.echo(f"Removed {item.name}") + + if rem_items: + click.echo(f"Cleared {rem_size / 1_000_000:.3f} MB belonging to {rem_items} items") + else: + click.echo("No items removed") + + +def should_remove_item(item: Path, app: str, key: str) -> bool: + if item.suffix not in [".tar", ".tgz"]: + return False + + name = item.name + if app and key and name.startswith(f"{app}-{key[:10]}."): + return True + + if app and name.startswith(f"{app}-"): + return True + + if key and f"-{key[:10]}." in name: + return True + + return False + + +def cache_clear() -> None: + cache_path = get_bench_cache_path("apps") + tot_items = len(os.listdir(cache_path)) + if not tot_items: + click.echo("No cached items") + return + + tot_size = get_dir_size(cache_path) + shutil.rmtree(cache_path) + + if tot_items: + click.echo(f"Cleared {tot_size / 1_000_000:.3f} MB belonging to {tot_items} items") + + +def get_dir_size(p: Path) -> int: + return sum(i.stat(follow_symlinks=False).st_size for i in p.iterdir()) diff --git a/bench/utils/cli.py b/bench/utils/cli.py new file mode 100644 index 0000000..6cae95f --- /dev/null +++ b/bench/utils/cli.py @@ -0,0 +1,92 @@ +from typing import List +import click +from click.core import _check_multicommand + + +def print_bench_version(ctx, param, value): + """Prints current bench version""" + if not value or ctx.resilient_parsing: + return + + import bench + + click.echo(bench.VERSION) + ctx.exit() + + +class MultiCommandGroup(click.Group): + def add_command(self, cmd, name=None): + """Registers another :class:`Command` with this group. If the name + is not provided, the name of the command is used. + + Note: This is a custom Group that allows passing a list of names for + the command name. + """ + name = name or cmd.name + if name is None: + raise TypeError("Command has no name.") + _check_multicommand(self, name, cmd, register=True) + + try: + self.commands[name] = cmd + except TypeError: + if isinstance(name, list): + for _name in name: + self.commands[_name] = cmd + + +class SugaredOption(click.Option): + def __init__(self, *args, **kwargs): + self.only_if_set: List = kwargs.pop("only_if_set") + kwargs["help"] = ( + kwargs.get("help", "") + + f". Option is acceptable only if {', '.join(self.only_if_set)} is used." + ) + super().__init__(*args, **kwargs) + + def handle_parse_result(self, ctx, opts, args): + current_opt = self.name in opts + if current_opt and self.only_if_set: + for opt in self.only_if_set: + if opt not in opts: + deafaults_set = [x.default for x in ctx.command.params if x.name == opt] + if not deafaults_set: + raise click.UsageError(f"Illegal Usage: Set '{opt}' before '{self.name}'.") + + return super().handle_parse_result(ctx, opts, args) + + +def use_experimental_feature(ctx, param, value): + if not value: + return + + if value == "dynamic-feed": + import bench.cli + + bench.cli.dynamic_feed = True + bench.cli.verbose = True + else: + from bench.exceptions import FeatureDoesNotExistError + + raise FeatureDoesNotExistError(f"Feature {value} does not exist") + + from bench.cli import is_envvar_warn_set + + if is_envvar_warn_set: + return + + click.secho( + "WARNING: bench is using it's new CLI rendering engine. This behaviour has" + f" been enabled by passing --{value} in the command. This feature is" + " experimental and may not be implemented for all commands yet.", + fg="yellow", + ) + + +def setup_verbosity(ctx, param, value): + if not value: + return + + import bench.cli + + bench.cli.verbose = True diff --git a/bench/utils/render.py b/bench/utils/render.py new file mode 100644 index 0000000..155aa75 --- /dev/null +++ b/bench/utils/render.py @@ -0,0 +1,130 @@ +# imports - standard imports +import sys +from io import StringIO + +# imports - third party imports +import click + +# imports - module imports +import bench + + +class Capturing(list): + """ + Util to consume the stdout encompassed in it and push it to a list + + with Capturing() as output: + subprocess.check_output("ls", shell=True) + + print(output) + # ["b'Applications\\nDesktop\\nDocuments\\nDownloads\\n'"] + """ + + def __enter__(self): + self._stdout = sys.stdout + sys.stdout = self._stringio = StringIO() + return self + + def __exit__(self, *args): + self.extend(self._stringio.getvalue().splitlines()) + del self._stringio # free up some memory + sys.stdout = self._stdout + + +class Rendering: + def __init__(self, success, title, is_parent, args, kwargs): + import bench.cli + + self.dynamic_feed = bench.cli.from_command_line and bench.cli.dynamic_feed + + if not self.dynamic_feed: + return + + try: + self.kw = args[0].__dict__ + except Exception: + self.kw = kwargs + + self.is_parent = is_parent + self.title = title + self.success = success + + def __enter__(self, *args, **kwargs): + if not self.dynamic_feed: + return + + _prefix = click.style("⏼", fg="bright_yellow") + _hierarchy = "" if self.is_parent else " " + self._title = self.title.format(**self.kw) + click.secho(f"{_hierarchy}{_prefix} {self._title}") + + bench.LOG_BUFFER.append( + { + "message": self._title, + "prefix": _prefix, + "color": None, + "is_parent": self.is_parent, + } + ) + + def __exit__(self, *args, **kwargs): + if not self.dynamic_feed: + return + + self._prefix = click.style("✔", fg="green") + self._success = self.success.format(**self.kw) + + self.render_screen() + + def render_screen(self): + click.clear() + + for l in bench.LOG_BUFFER: + if l["message"] == self._title: + l["prefix"] = self._prefix + l["message"] = self._success + _hierarchy = "" if l.get("is_parent") else " " + click.secho(f'{_hierarchy}{l["prefix"]} {l["message"]}', fg=l["color"]) + + +def job(title: str = None, success: str = None): + """Supposed to be wrapped around an atomic job in a given process. + For instance, the `get-app` command consists of two jobs: `initializing bench` + and `fetching and installing app`. + """ + + def innfn(fn): + def wrapper_fn(*args, **kwargs): + with Rendering( + success=success, + title=title, + is_parent=True, + args=args, + kwargs=kwargs, + ): + return fn(*args, **kwargs) + + return wrapper_fn + + return innfn + + +def step(title: str = None, success: str = None): + """Supposed to be wrapped around the smallest possible atomic step in a given operation. + For instance, `building assets` is a step in the update operation. + """ + + def innfn(fn): + def wrapper_fn(*args, **kwargs): + with Rendering( + success=success, + title=title, + is_parent=False, + args=args, + kwargs=kwargs, + ): + return fn(*args, **kwargs) + + return wrapper_fn + + return innfn diff --git a/bench/utils/system.py b/bench/utils/system.py new file mode 100644 index 0000000..9cbfec1 --- /dev/null +++ b/bench/utils/system.py @@ -0,0 +1,216 @@ +# imports - standard imports +import grp +import os +import pwd +import shutil +import sys + +# imports - module imports +import bench +from bench.utils import ( + exec_cmd, + get_process_manager, + log, + run_xhiveframework_cmd, + sudoers_file, + which, + is_valid_xhiveframework_branch, +) +from bench.utils.bench import build_assets, clone_apps_from +from bench.utils.render import job + + +@job(title="Initializing Bench {path}", success="Bench {path} initialized") +def init( + path, + apps_path=None, + no_procfile=False, + no_backups=False, + xhiveframework_path=None, + xhiveframework_branch=None, + verbose=False, + clone_from=None, + skip_redis_config_generation=False, + clone_without_update=False, + skip_assets=False, + python="python3", + install_app=None, + dev=False, +): + """Initialize a new bench directory + + * create a bench directory in the given path + * setup logging for the bench + * setup env for the bench + * setup config (dir/pids/redis/procfile) for the bench + * setup patches.txt for bench + * clone & install xhiveframework + * install python & node dependencies + * build assets + * setup backups crontab + """ + + # Use print("\033c", end="") to clear entire screen after each step and re-render each list + # another way => https://stackoverflow.com/a/44591228/10309266 + + import bench.cli + from bench.app import get_app, install_apps_from_path + from bench.bench import Bench + + verbose = bench.cli.verbose or verbose + + bench = Bench(path) + + bench.setup.dirs() + bench.setup.logging() + bench.setup.env(python=python) + config = {} + if dev: + config["developer_mode"] = 1 + bench.setup.config( + redis=not skip_redis_config_generation, + procfile=not no_procfile, + additional_config=config, + ) + bench.setup.patches() + + # local apps + if clone_from: + clone_apps_from( + bench_path=path, clone_from=clone_from, update_app=not clone_without_update + ) + + # remote apps + else: + xhiveframework_path = xhiveframework_path or "https://lab.membtech.com/xhiveframework/xhiveframework15.git" + is_valid_xhiveframework_branch(xhiveframework_path=xhiveframework_path, xhiveframework_branch=xhiveframework_branch) + get_app( + xhiveframework_path, + branch=xhiveframework_branch, + bench_path=path, + skip_assets=True, + verbose=verbose, + resolve_deps=False, + ) + + # fetch remote apps using config file - deprecate this! + if apps_path: + install_apps_from_path(apps_path, bench_path=path) + + # getting app on bench init using --install-app + if install_app: + get_app( + install_app, + branch=xhiveframework_branch, + bench_path=path, + skip_assets=True, + verbose=verbose, + resolve_deps=False, + ) + + if not skip_assets: + build_assets(bench_path=path) + + if not no_backups: + bench.setup.backups() + + +def setup_sudoers(user): + from bench.config.lets_encrypt import get_certbot_path + + if not os.path.exists("/etc/sudoers.d"): + os.makedirs("/etc/sudoers.d") + + set_permissions = not os.path.exists("/etc/sudoers") + with open("/etc/sudoers", "a") as f: + f.write("\n#includedir /etc/sudoers.d\n") + + if set_permissions: + os.chmod("/etc/sudoers", 0o440) + + template = bench.config.env().get_template("xhiveframework_sudoers") + xhiveframework_sudoers = template.render( + **{ + "user": user, + "service": which("service"), + "systemctl": which("systemctl"), + "nginx": which("nginx"), + "certbot": get_certbot_path(), + } + ) + + with open(sudoers_file, "w") as f: + f.write(xhiveframework_sudoers) + + os.chmod(sudoers_file, 0o440) + log(f"Sudoers was set up for user {user}", level=1) + + +def start(no_dev=False, concurrency=None, procfile=None, no_prefix=False, procman=None): + program = which(procman) if procman else get_process_manager() + if not program: + raise Exception("No process manager found") + + os.environ["PYTHONUNBUFFERED"] = "true" + if not no_dev: + os.environ["DEV_SERVER"] = "true" + + command = [program, "start"] + if concurrency: + command.extend(["-c", concurrency]) + + if procfile: + command.extend(["-f", procfile]) + + if no_prefix: + command.extend(["--no-prefix"]) + + os.execv(program, command) + + +def migrate_site(site, bench_path="."): + run_xhiveframework_cmd("--site", site, "migrate", bench_path=bench_path) + + +def backup_site(site, bench_path="."): + run_xhiveframework_cmd("--site", site, "backup", bench_path=bench_path) + + +def backup_all_sites(bench_path="."): + from bench.bench import Bench + + for site in Bench(bench_path).sites: + backup_site(site, bench_path=bench_path) + + +def fix_prod_setup_perms(bench_path=".", xhiveframework_user=None): + from glob import glob + from bench.bench import Bench + + xhiveframework_user = xhiveframework_user or Bench(bench_path).conf.get("xhiveframework_user") + + if not xhiveframework_user: + print("xhiveframework user not set") + sys.exit(1) + + globs = ["logs/*", "config/*"] + for glob_name in globs: + for path in glob(glob_name): + uid = pwd.getpwnam(xhiveframework_user).pw_uid + gid = grp.getgrnam(xhiveframework_user).gr_gid + os.chown(path, uid, gid) + + +def setup_fonts(): + fonts_path = os.path.join("/tmp", "fonts") + + if os.path.exists("/etc/fonts_backup"): + return + + exec_cmd("git clone https://lab.membtech.com/xhiveframework/fonts.git", cwd="/tmp") + os.rename("/etc/fonts", "/etc/fonts_backup") + os.rename("/usr/share/fonts", "/usr/share/fonts_backup") + os.rename(os.path.join(fonts_path, "etc_fonts"), "/etc/fonts") + os.rename(os.path.join(fonts_path, "usr_share_fonts"), "/usr/share/fonts") + shutil.rmtree(fonts_path) + exec_cmd("fc-cache -fv") diff --git a/bench/utils/translation.py b/bench/utils/translation.py new file mode 100644 index 0000000..1ed87d8 --- /dev/null +++ b/bench/utils/translation.py @@ -0,0 +1,58 @@ +# imports - standard imports +import itertools +import json +import os + + +def update_translations_p(args): + import requests + + try: + update_translations(*args) + except requests.exceptions.HTTPError: + print("Download failed for", args[0], args[1]) + + +def download_translations_p(): + import multiprocessing + + pool = multiprocessing.Pool(multiprocessing.cpu_count()) + + langs = get_langs() + apps = ("xhiveframework", "xhiveerp") + args = list(itertools.product(apps, langs)) + + pool.map(update_translations_p, args) + + +def download_translations(): + langs = get_langs() + apps = ("xhiveframework", "xhiveerp") + for app, lang in itertools.product(apps, langs): + update_translations(app, lang) + + +def get_langs(): + lang_file = "apps/xhiveframework/xhiveframework/geo/languages.json" + with open(lang_file) as f: + langs = json.loads(f.read()) + return [d["code"] for d in langs] + + +def update_translations(app, lang): + import requests + + translations_dir = os.path.join("apps", app, app, "translations") + csv_file = os.path.join(translations_dir, f"{lang}.csv") + url = f"https://translate.xhiveerp.com/files/{app}-{lang}.csv" + r = requests.get(url, stream=True) + r.raise_for_status() + + with open(csv_file, "wb") as f: + for chunk in r.iter_content(chunk_size=1024): + # filter out keep-alive new chunks + if chunk: + f.write(chunk) + f.flush() + + print("downloaded for", app, lang) diff --git a/completion.sh b/completion.sh new file mode 100644 index 0000000..b528a4a --- /dev/null +++ b/completion.sh @@ -0,0 +1,37 @@ +_bench_completion() { + # Complete commands using click bashcomplete + COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \ + COMP_CWORD=$COMP_CWORD \ + _BENCH_COMPLETE=complete $1 ) ) + if [ -d "sites" ]; then + # Also add xhiveframework commands if present + + # bench_helper.py expects to be executed from "sites" directory + cd sites + + # All xhiveframework commands are subcommands under "bench xhiveframework" + # Xhiveframework is only installed in virtualenv "env" so use appropriate python executable + COMPREPLY+=( $( COMP_WORDS="bench xhiveframework "${COMP_WORDS[@]:1} \ + COMP_CWORD=$(($COMP_CWORD+1)) \ + _BENCH_COMPLETE=complete ../env/bin/python ../apps/xhiveframework/xhiveframework/utils/bench_helper.py ) ) + + # If the word before the current cursor position in command typed so far is "--site" then only list sites + if [ ${COMP_WORDS[COMP_CWORD-1]} == "--site" ]; then + COMPREPLY=( $( ls -d ./*/site_config.json | cut -f 2 -d "/" | xargs echo ) ) + fi + + # Get out of sites directory now + cd .. + fi + return 0 +} + +# Only support bash and zsh +if [ -n "$BASH" ] ; then + complete -F _bench_completion -o default bench; +elif [ -n "$ZSH_VERSION" ]; then + # Use zsh in bash compatibility mode + autoload bashcompinit + bashcompinit + complete -F _bench_completion -o default bench; +fi diff --git a/docs/bench_custom_cmd.md b/docs/bench_custom_cmd.md new file mode 100644 index 0000000..c04b77d --- /dev/null +++ b/docs/bench_custom_cmd.md @@ -0,0 +1,47 @@ +## How are Xhiveframework Framework commands available via bench? + +bench utilizes `xhiveframework.utils.bench_manager` to get the framework's as well as those of any custom commands written in application installed in the Xhiveframework environment. Currently, with *version 12* there are commands related to the scheduler, sites, translations and other utils in Xhiveframework inherited by bench. + + +## Can I add CLI commands in my custom app and call them via bench? + +Along with the framework commands, Xhiveframework's `bench_manager` module also searches for any commands in your custom applications. Thereby, bench communicates with the respective bench's Xhiveframework which in turn checks for available commands in all of the applications. + +To make your custom command available to bench, just create a `commands` module under your parent module and write the command with a click wrapper and a variable commands which contains a list of click functions, which are your own commands. The directory structure may be visualized as: + +``` +xhiveframework-bench +|──apps + |── xhiveframework + ├── custom_app + │   ├── README.md + │   ├── custom_app + │   │   ├── commands <------ commands module + │   ├── license.txt + │   ├── requirements.txt + │   └── setup.py +``` + +The commands module maybe a single file such as `commands.py` or a directory with an `__init__.py` file. For a custom application of name 'flags', example may be given as + +```python +# file_path: xhiveframework-bench/apps/flags/flags/commands.py +import click + +@click.command('set-flags') +@click.argument('state', type=click.Choice(['on', 'off'])) +def set_flags(state): + from flags.utils import set_flags + set_flags(state=state) + +commands = [ + set_flags +] +``` + +and with context of the current bench, this command maybe executed simply as + +```zsh +➜ bench set-flags +Flags are set to state: 'on' +``` diff --git a/docs/bench_usage.md b/docs/bench_usage.md new file mode 100644 index 0000000..9d9115c --- /dev/null +++ b/docs/bench_usage.md @@ -0,0 +1,201 @@ +# bench CLI Usage + +This may not be known to a lot of people but half the bench commands we're used to, exist in the Xhiveframework Framework and not in bench directly. Those commands generally are the `--site` commands. This page is concerned only with the commands in the bench project. Any framework commands won't be a part of this consolidation. + + +# bench CLI Commands + +Under Click's structure, `bench` is the main command group, under which there are three main groups of commands in bench currently, namely + + - **install**: The install command group deals with commands used to install system dependencies for setting up Xhiveframework environment + + - **setup**: This command group for consists of commands used to maipulate the requirements and environments required by your Xhiveframework environment + + - **config**: The config command group deals with making changes in the current bench (not the CLI tool) configuration + + +## Using the bench command line + +```zsh +➜ bench +Usage: bench [OPTIONS] COMMAND [ARGS]... + + Bench manager for Xhiveframework + +Options: + --version + --help Show this message and exit. + +Commands: + backup Backup single site + backup-all-sites Backup all sites in current bench + config Change bench configuration + disable-production Disables production environment for the bench. + download-translations Download latest translations + exclude-app Exclude app from updating + find Finds benches recursively from location + get-app Clone an app from the internet or filesystem and... +``` + +Similarly, all available flags and options can be checked for commands individually by executing them with the `--help` flag. The `init` command for instance: + +```zsh +➜ bench init --help +Usage: bench init [OPTIONS] PATH + + Initialize a new bench instance in the specified path + +Options: + --python TEXT Path to Python Executable. + --ignore-exist Ignore if Bench instance exists. + --apps_path TEXT path to json files with apps to install + after init +``` + + + +## bench and sudo + +Some bench commands may require sudo, such as some `setup` commands and everything else under the `install` commands group. For these commands, you may not be asked for your root password if sudoers setup has been done. The security implications, well we'll talk about those soon. + + + +## General Commands + +These commands belong directly to the bench group so they can be invoked directly prefixing each with `bench` in your shell. Therefore, the usage for these commands is as + +```zsh + bench COMMAND [ARGS]... +``` + +### The usual commands + + - **init**: Initialize a new bench instance in the specified path. This sets up a complete bench folder with an `apps` folder which contains all the Xhiveframework apps available in the current bench, `sites` folder that stores all site data seperated by individual site folders, `config` folder that contains your redis, NGINX and supervisor configuration files. The `env` folder consists of all python dependencies the current bench and installed Xhiveframework applications have. + - **restart**: Restart web, supervisor, systemd processes units. Used in production setup. + - **update**: If executed in a bench directory, without any flags will backup, pull, setup requirements, build, run patches and restart bench. Using specific flags will only do certain tasks instead of all. + - **migrate-env**: Migrate Virtual Environment to desired Python version. This regenerates the `env` folder with the specified Python version. + - **retry-upgrade**: Retry a failed upgrade + - **disable-production**: Disables production environment for the bench. + - **renew-lets-encrypt**: Renew Let's Encrypt certificate for site SSL. + - **backup**: Backup single site data. Can be used to backup files as well. + - **backup-all-sites**: Backup all sites in current bench. + + - **get-app**: Download an app from the internet or filesystem and set it up in your bench. This clones the git repo of the Xhiveframework project and installs it in the bench environment. + - **remove-app**: Completely remove app from bench and re-build assets if not installed on any site. + - **exclude-app**: Exclude app from updating during a `bench update` + - **include-app**: Include app for updating. All Xhiveframework applications are included by default when installed. + - **remote-set-url**: Set app remote url + - **remote-reset-url**: Reset app remote url to xhiveframework official + - **remote-urls**: Show apps remote url + - **switch-to-branch**: Switch all apps to specified branch, or specify apps separated by space + - **switch-to-develop**: Switch Xhiveframework and XhiveERP to develop branch + + +### A little advanced + + - **set-nginx-port**: Set NGINX port for site + - **set-ssl-certificate**: Set SSL certificate path for site + - **set-ssl-key**: Set SSL certificate private key path for site + - **set-url-root**: Set URL root for site + - **set-mariadb-host**: Set MariaDB host for bench + - **set-redis-cache-host**: Set Redis cache host for bench + - **set-redis-queue-host**: Set Redis queue host for bench + - **set-redis-socketio-host**: Set Redis socketio host for bench + - **use**: Set default site for bench + - **download-translations**: Download latest translations + + +### Developer's commands + + - **start**: Start Xhiveframework development processes. Uses the Procfile to start the Xhiveframework development environment. + - **src**: Prints bench source folder path, which can be used to cd into the bench installation repository by `cd $(bench src)`. + - **find**: Finds benches recursively from location or specified path. + - **pip**: Use the current bench's pip to manage Python packages. For help about pip usage: `bench pip help [COMMAND]` or `bench pip [COMMAND] -h`. + - **new-app**: Create a new Xhiveframework application under apps folder. + + +### Release bench + - **release**: Create a release of a Xhiveframework application + - **prepare-beta-release**: Prepare major beta release from develop branch + + + +## Setup commands + +The setup commands used for setting up the Xhiveframework environment in context of the current bench need to be executed using `bench setup` as the prefix. So, the general usage of these commands is as + +```zsh + bench setup COMMAND [ARGS]... +``` + + - **sudoers**: Add commands to sudoers list for allowing bench commands execution without root password + + - **env**: Setup Python virtual environment for bench. This sets up a `env` folder under the root of the bench directory. + - **redis**: Generates configuration for Redis + - **fonts**: Add Xhiveframework fonts to system + - **config**: Generate or over-write sites/common_site_config.json + - **backups**: Add cronjob for bench backups + - **socketio**: Setup node dependencies for socketio server + - **requirements**: Setup Python and Node dependencies + + - **manager**: Setup `bench-manager.local` site with the [Bench Manager](https://lab.membtech.com/xhiveframework/bench_manager) app, a GUI for bench installed on it. + + - **procfile**: Generate Procfile for bench start + + - **production**: Setup Xhiveframework production environment for specific user. This installs ansible, NGINX, supervisor, fail2ban and generates the respective configuration files. + - **nginx**: Generate configuration files for NGINX + - **fail2ban**: Setup fail2ban, an intrusion prevention software framework that protects computer servers from brute-force attacks + - **systemd**: Generate configuration for systemd + - **firewall**: Setup firewall for system + - **ssh-port**: Set SSH Port for system + - **reload-nginx**: Checks NGINX config file and reloads service + - **supervisor**: Generate configuration for supervisor + - **lets-encrypt**: Setup lets-encrypt SSL for site + - **wildcard-ssl**: Setup wildcard SSL certificate for multi-tenant bench + + - **add-domain**: Add a custom domain to a particular site + - **remove-domain**: Remove custom domain from a site + - **sync-domains**: Check if there is a change in domains. If yes, updates the domains list. + + - **role**: Install dependencies via ansible roles + + + +## Config commands + +The config group commands are used for manipulating configurations in the current bench context. The usage for these commands is as + +```zsh + bench config COMMAND [ARGS]... +``` + + - **set-common-config**: Set value in common config + - **remove-common-config**: Remove specific keys from current bench's common config + + - **update_bench_on_update**: Enable/Disable bench updates on running bench update + - **restart_supervisor_on_update**: Enable/Disable auto restart of supervisor processes + - **restart_systemd_on_update**: Enable/Disable auto restart of systemd units + - **dns_multitenant**: Enable/Disable bench multitenancy on running bench update + - **serve_default_site**: Configure nginx to serve the default site on port 80 + - **http_timeout**: Set HTTP timeout + + + +## Install commands + +The install group commands are used for manipulating system level dependencies. The usage for these commands is as + +```zsh + bench install COMMAND [ARGS]... +``` + + - **prerequisites**: Installs pre-requisite libraries, essential tools like b2zip, htop, screen, vim, x11-fonts, python libs, cups and Redis + - **nodejs**: Installs Node.js v8 + - **nginx**: Installs NGINX. If user is specified, sudoers is setup for that user + - **packer**: Installs Oracle virtualbox and packer 1.2.1 + - **psutil**: Installs psutil via pip + - **mariadb**: Install and setup MariaDB of specified version and root password + - **wkhtmltopdf**: Installs wkhtmltopdf v0.12.3 for linux + - **supervisor**: Installs supervisor. If user is specified, sudoers is setup for that user + - **fail2ban**: Install fail2ban, an intrusion prevention software framework that protects computer servers from brute-force attacks + - **virtualbox**: Installs supervisor diff --git a/docs/branch_details.md b/docs/branch_details.md new file mode 100644 index 0000000..094fc42 --- /dev/null +++ b/docs/branch_details.md @@ -0,0 +1,13 @@ +### XhiveERP/Xhiveframework Branching + +#### Branch Description + - `develop` Branch: All new feature developments will go in develop branch + - `staging` Branch: This branch serves as a release candidate. Before a week, release team will pull the feature from develop branch to staging branch. + EG: if the feature is in 25 July's milestone then it should go in staging on 19th July. + - `master` Branch: Community release. + - `hotfix` Branch: mainly define for support issues. This will include bugs or any high priority task like security patches. + +#### Where to send PR? + - If you are working on a new feature, then PR should point to develop branch + - If you are working on support issue / bug / error report, then PR should point to hotfix brach + - While performing testing on Staging branch, if any fix needed then only send that fix PR to staging. diff --git a/docs/commands_and_usage.md b/docs/commands_and_usage.md new file mode 100644 index 0000000..822aeb2 --- /dev/null +++ b/docs/commands_and_usage.md @@ -0,0 +1,74 @@ +## Usage + +* Updating + +To update the bench CLI tool, depending on your method of installation, you may use + + pip3 install -U xhiveframework-bench + + +To backup, update all apps and sites on your bench, you may use + + bench update + + +To manually update the bench, run `bench update` to update all the apps, run +patches, build JS and CSS files and restart supervisor (if configured to). + +You can also run the parts of the bench selectively. + +`bench update --pull` will only pull changes in the apps + +`bench update --patch` will only run database migrations in the apps + +`bench update --build` will only build JS and CSS files for the bench + +`bench update --bench` will only update the bench utility (this project) + +`bench update --requirements` will only update all dependencies (Python + Node) for the apps available in current bench + + +* Create a new bench + + The init command will create a bench directory with xhiveframework framework installed. It will be setup for periodic backups and auto updates once a day. + + bench init xhiveframework-bench && cd xhiveframework-bench + +* Add a site + + Xhiveframework apps are run by xhiveframework sites and you will have to create at least one site. The new-site command allows you to do that. + + bench new-site site1.local + +* Add apps + + The get-app command gets remote xhiveframework apps from a remote git repository and installs them. Example: [xhiveerp](https://lab.membtech.comxhiveframework/xhiveerp) + + bench get-app xhiveerp https://lab.membtech.comxhiveframework/xhiveerp + +* Install apps + + To install an app on your new site, use the bench `install-app` command. + + bench --site site1.local install-app xhiveerp + +* Start bench + + To start using the bench, use the `bench start` command + + bench start + + To login to Xhiveframework / XhiveERP, open your browser and go to `[your-external-ip]:8000`, probably `localhost:8000` + + The default username is "Administrator" and password is what you set when you created the new site. + +* Setup Manager + +## What it does + + bench setup manager + +1. Create new site bench-manager.local +2. Gets the `bench_manager` app from https://lab.membtech.com/xhiveframework/bench_manager if it doesn't exist already +3. Installs the bench_manager app on the site bench-manager.local + diff --git a/docs/contribution_guidelines.md b/docs/contribution_guidelines.md new file mode 100644 index 0000000..b937f44 --- /dev/null +++ b/docs/contribution_guidelines.md @@ -0,0 +1,46 @@ +# Contribution Guidelines + +### Introduction (for first timers) + +Thank you for your interest in contributing to our project! Our world works on people taking initiative to contribute to the "commons" and contributing to open source means you are contributing to make things better for not only yourself, but everyone else too! So kudos to you for taking this initiative. + +Great projects depend on good code quality and adhering to certain standards while making sure the goals of the project are met. New features should follow the same pattern and so that users don't have to learn things again and again. + +Developers who maintain open source also expect that you follow certain guidelines. These guidelines ensure that developers are able quickly give feedback on your contribution and how to make it better. Most probably you might have to go back and change a few things, but it will be in th interest of making this process better for everyone. So do be prepared for some back and forth. + +Happy contributing! + +### Feedback Policy + +We will strive for a "Zero Pull Request Pending" policy, inspired by "Zero Inbox". This means, that if the pull request is good, it will be merged within a day and if it does not meet the requirements, it will be closed. + +### Design Guides + +Please read the following design guidelines carefully when contributing: + +1. [Form Design Guidelines](https://lab.membtech.comxhiveframework/xhiveerp/wiki/Form-Design-Guidelines) +1. [How to break large contributions into smaller ones](https://lab.membtech.comxhiveframework/xhiveerp/wiki/Cascading-Pull-Requests) + +### Pull Request Requirements + +1. **Test Cases:** Important to add test cases, even if its a very simple one that just calls the function. For UI, till we don't have Selenium testing setup, we need to see a screenshot / animated GIF. +1. **UX:** If your change involves user experience, add a screenshot / narration / animated GIF. +1. **Documentation:** Test Case must involve updating necessary documentation +1. **Explanation:** Include explanation if there is a design change, explain the use case and why this suggested change is better. If you are including a new library or replacing one, please give sufficient reference of why the suggested library is better. +1. **Demo:** Remember to update the demo script so that data related your feature is included in the demo. +1. **Failing Tests:** This is simple, you must make sure all automated tests are passing. +1. **Very Large Contribution:** It is very hard to accept and merge very large contributions, because there are too many lines of code to check and its implications can be large and unexpected. They way to contribute big features is to build them part by part. We can understand there are exceptions, but in most cases try and keep your pull-request to **30 lines of code** excluding tests and config files. **Use [Cascading Pull Requests](https://lab.membtech.comxhiveframework/xhiveerp/wiki/Cascading-Pull-Requests)** for large features. +1. **Incomplete Contributions must be hidden:** If the contribution is WIP or incomplete - which will most likely be the case, you can send small PRs as long as the user is not exposed to unfinished functionality. This will ensure that your code does not have build or other collateral issues. But these features must remain completely hidden to the user. +1. **Incorrect Patches:** If your design involves schema change and you must include patches that update the data as per your new schema. +1. **Incorrect Naming:** The naming of variables, models, fields etc must be consistent as per the existing design and semantics used in the system. +1. **Translated Strings:** All user facing strings / text must be wrapped in the `__("")` function in javascript and `_("")` function in Python, so that it is shown as translated to the user. +1. **Deprecated API:** The API used in the pull request must be the latest recommended methods and usage of globals like `cur_frm` must be avoided. +1. **Whitespace and indentation:** The XhiveERP and Xhiveframework Project uses tabs (I know and we are sorry, but its too much effort to change it now and we don't want to lose the history). The indentation must be consistent whether you are writing Javascript or Python. Multi-line strings or expressions must also be consistently indented, not hanging like a bee hive at the end of the line. We just think the code looks a lot more stable that way. + +#### What if my Pull Request is closed? + +Don't worry, fix the problem and re-open it! + +#### Why do we follow this policy? + +This is because XhiveERP is at a stage where it is being used by thousands of companies and introducing breaking changes can be harmful for everyone. Also we do not want to stop the speed of contributions and the best way to encourage contributors is to give fast feedback. diff --git a/docs/installation.md b/docs/installation.md new file mode 100644 index 0000000..3ce3fb1 --- /dev/null +++ b/docs/installation.md @@ -0,0 +1,35 @@ +### Requirements + +You will need a computer/server. Options include: + +- A Normal Computer/VPS/Baremetal Server: This is strongly recommended. Xhiveframework/XhiveERP installs properly and works well on these +- A Raspberry Pi, SAN Appliance, Network Router, Gaming Console, etc.: Although you may be able to install Xhiveframework/XhiveERP on specialized hardware, it is unlikely to work well and will be difficult for us to support. Strongly consider using a normal computer/VPS/baremetal server instead. **We do not support specialized hardware**. +- A Toaster, Car, Firearm, Thermostat, etc.: Yes, many modern devices now have embedded computing capability. We live in interesting times. However, you should not install Xhiveframework/XhiveERP on these devices. Instead, install it on a normal computer/VPS/baremetal server. **We do not support installing on noncomputing devices**. + +To install the Xhiveframework/XhiveERP server software, you will need an operating system on your normal computer which is not Windows. Note that the command line interface does work on Windows, and you can use Xhiveframework/XhiveERP from any operating system with a web browser. However, the server software does not run on Windows. It does run on other operating systems, so choose one of these instead: + +- Linux: Ubuntu, Debian, CentOS are the preferred distros and are tested. [Arch Linux](https://lab.membtech.com/xhiveframework/bench_new/wiki/Install-XhiveERP-on-ArchLinux) can also be used +- Mac OS X + +### Manual Install + +To manually install xhiveframework/xhiveerp, you can follow this [this wiki](https://lab.membtech.com/xhiveframework/xhiveframework15/wiki/The-Hitchhiker%27s-Guide-to-Installing-Xhiveframework-on-Linux) for Linux and [this wiki](https://lab.membtech.com/xhiveframework/xhiveframework15/wiki/The-Hitchhiker's-Guide-to-Installing-Xhiveframework-on-Mac-OS-X) for MacOS. It gives an excellent explanation about the stack. You can also follow the steps mentioned below: + +#### 1. Install Prerequisites +
+• Python 3.6+
+• Node.js 12
+• Redis 5					(caching and realtime updates)
+• MariaDB 10.3 / Postgres 9.5			(to run database driven apps)
+• yarn 1.12+					(js dependency manager)
+• pip 15+					(py dependency manager)
+• cron 						(scheduled jobs)
+• wkhtmltopdf (version 0.12.5 with patched qt) 	(for pdf generation)
+• Nginx 					(for production)
+
+ +#### 2. Install Bench + +Install the latest bench using pip + + pip3 install xhiveframework-bench diff --git a/docs/release_policy.md b/docs/release_policy.md new file mode 100644 index 0000000..5f6d75b --- /dev/null +++ b/docs/release_policy.md @@ -0,0 +1,63 @@ +# Release Policy + +#### Definitions: + - `develop` Branch: All new feature developments will go in develop branch + - `staging` Branch: This branch serves as a release candidate. Before a week, release team will pull the feature from develop branch to staging branch. + EG: if the feature is in 25 July's milestone then it should go in staging on 19th July. + - `master` Branch: `master` branch serves as a stable branch. This will use as production deployment. + - `hotfix` Branch: mainly define for support issues. This will include bugs or any high priority task like security patches. + +#### Create release from staging +- On Tuesday, we will release from staging to master. + +- Versioning: Given a version number MAJOR.MINOR.PATCH, increment the: + - MAJOR version when you make incompatible API changes, + - MINOR version when you add functionality in a backwards-compatible manner, and + - PATCH version when you make backwards-compatible bug fixes. + +- Impact on branches: + - merge staging branch to master + - push merge commit back to staging branch + - push merge commit to develop branch + - push merge commit to hotfix branch + +- Use release command to create release, +``` usage: bench release APP patch|minor|major --from-branch staging ``` + +--- + +#### Create staging branch + +- On Wednesday morning, `develop` will be merge into `staging`. `staging` branch is a release candidate. All new features will first go from `develop` to `staging` and then `staging` to `master`. + +- Use the prepare-staging command to create staging branch +```usage: bench prepare-staging APP``` + +- Impact on branches? + - merge all commits from develop branch to staging + - push merge commit back to develop + +- QA will use staging for testing. + +- Deploy staging branch on xhiveframework.io, xhiveerp.org, xhiveframework.xhiveerp.com. + +- Only regression and security fixes can be cherry-picked into staging + +- Create a discuss post on what all new features or fixes going in next version. + +--- + +#### Create release from hotfix +- Depending on priority, hotfix release will take place. + +- Versioning: + - PATCH version when you make backwards-compatible bug fixes. + +- Impact on branches: + - merge hotfix branch to master + - push merge commit back to staging branch + - push merge commit to develop branch + - push merge commit to staging branch + +- Use release command to create release, +``` usage: bench release APP patch --from-branch hotfix ``` diff --git a/docs/releasing_xhiveframework_apps.md b/docs/releasing_xhiveframework_apps.md new file mode 100644 index 0000000..be62589 --- /dev/null +++ b/docs/releasing_xhiveframework_apps.md @@ -0,0 +1,41 @@ +# Releasing Xhiveframework XhiveERP + +* Make a new bench dedicated for releasing +``` +bench init release-bench --xhiveframework-path git@github.com:xhiveframework/xhiveframework15.git +``` + +* Get XhiveERP in the release bench +``` +bench get-app xhiveerp git@github.com:xhiveframework/xhiveerp15.git +``` + +* Configure as release bench. Add this to the common_site_config.json +``` +"release_bench": true, +``` + +* Add branches to update in common_site_config.json +``` +"branches_to_update": { + "staging": ["develop", "hotfix"], + "hotfix": ["develop", "staging"] +} +``` + +* Use the release commands to release +``` +Usage: bench release [OPTIONS] APP BUMP_TYPE +``` + +* Arguments : + * _APP_ App name e.g [xhiveframework|xhiveerp|yourapp] + * _BUMP_TYPE_ [major|minor|patch|stable|prerelease] +* Options: + * --from-branch git develop branch, default is develop + * --to-branch git master branch, default is master + * --remote git remote, default is upstream + * --owner git owner, default is xhiveframework + * --repo-name git repo name if different from app name + +* When updating major version, update `develop_version` in hooks.py, e.g. `9.x.x-develop` diff --git a/easy-install.py b/easy-install.py new file mode 100755 index 0000000..412f4c2 --- /dev/null +++ b/easy-install.py @@ -0,0 +1,356 @@ +#!/usr/bin/env python3 + +import argparse +import fileinput +import logging +import os +import platform +import subprocess +import sys +import time +import urllib.request +from shutil import move, unpack_archive, which +from typing import Dict + +logging.basicConfig( + filename="easy-install.log", + filemode="w", + format="%(asctime)s - %(levelname)s - %(message)s", + level=logging.INFO, +) + + +def cprint(*args, level: int = 1): + """ + logs colorful messages + level = 1 : RED + level = 2 : GREEN + level = 3 : YELLOW + + default level = 1 + """ + CRED = "\033[31m" + CGRN = "\33[92m" + CYLW = "\33[93m" + reset = "\033[0m" + message = " ".join(map(str, args)) + if level == 1: + print(CRED, message, reset) + if level == 2: + print(CGRN, message, reset) + if level == 3: + print(CYLW, message, reset) + + +def clone_xhiveframework_docker_repo() -> None: + try: + urllib.request.urlretrieve( + "https://lab.membtech.com/xhiveframework/xhiveframework_docker/archive/refs/heads/main.zip", + "xhiveframework_docker.zip", + ) + logging.info("Downloaded xhiveframework_docker zip file from GitHub") + unpack_archive( + "xhiveframework_docker.zip", "." + ) # Unzipping the xhiveframework_docker.zip creates a folder "xhiveframework_docker-main" + move("xhiveframework_docker-main", "xhiveframework_docker") + logging.info("Unzipped and Renamed xhiveframework_docker") + os.remove("xhiveframework_docker.zip") + logging.info("Removed the downloaded zip file") + except Exception as e: + logging.error("Download and unzip failed", exc_info=True) + cprint("\nCloning xhiveframework_docker Failed\n\n", "[ERROR]: ", e, level=1) + + +def get_from_env(dir, file) -> Dict: + env_vars = {} + with open(os.path.join(dir, file)) as f: + for line in f: + if line.startswith("#") or not line.strip(): + continue + key, value = line.strip().split("=", 1) + env_vars[key] = value + return env_vars + + +def write_to_env( + wd: str, + sites, + db_pass: str, + admin_pass: str, + email: str, + xhiveerp_version: str = None, +) -> None: + quoted_sites = ",".join([f"`{site}`" for site in sites]).strip(",") + example_env = get_from_env(wd, "example.env") + xhiveerp_version = xhiveerp_version or example_env["XHIVEERP_VERSION"] + with open(os.path.join(wd, ".env"), "w") as f: + f.writelines( + [ + f"XHIVEERP_VERSION={xhiveerp_version}\n", # defaults to latest version of XhiveERP + f"DB_PASSWORD={db_pass}\n", + "DB_HOST=db\n", + "DB_PORT=3306\n", + "REDIS_CACHE=redis-cache:6379\n", + "REDIS_QUEUE=redis-queue:6379\n", + "REDIS_SOCKETIO=redis-socketio:6379\n", + f"LETSENCRYPT_EMAIL={email}\n", + f"SITE_ADMIN_PASS={admin_pass}\n", + f"SITES={quoted_sites}\n", + ] + ) + + +def generate_pass(length: int = 12) -> str: + """Generate random hash using best available randomness source.""" + import math + import secrets + + if not length: + length = 56 + + return secrets.token_hex(math.ceil(length / 2))[:length] + + +def check_repo_exists() -> bool: + return os.path.exists(os.path.join(os.getcwd(), "xhiveframework_docker")) + + +def setup_prod(project: str, sites, email: str, version: str = None, image = None) -> None: + if len(sites) == 0: + sites = ["site1.localhost"] + + if check_repo_exists(): + compose_file_name = os.path.join(os.path.expanduser("~"), f"{project}-compose.yml") + docker_repo_path = os.path.join(os.getcwd(), "xhiveframework_docker") + cprint( + "\nPlease refer to .example.env file in the xhiveframework_docker folder to know which keys to set\n\n", + level=3, + ) + admin_pass = "" + db_pass = "" + with open(compose_file_name, "w") as f: + # Writing to compose file + if not os.path.exists(os.path.join(docker_repo_path, ".env")): + admin_pass = generate_pass() + db_pass = generate_pass(9) + write_to_env(docker_repo_path, sites, db_pass, admin_pass, email, version) + cprint( + "\nA .env file is generated with basic configs. Please edit it to fit to your needs \n", + level=3, + ) + with open(os.path.join(os.path.expanduser("~"), "passwords.txt"), "w") as en: + en.writelines(f"ADMINISTRATOR_PASSWORD={admin_pass}\n") + en.writelines(f"MARIADB_ROOT_PASSWORD={db_pass}\n") + else: + env = get_from_env(docker_repo_path, ".env") + admin_pass = env["SITE_ADMIN_PASS"] + db_pass = env["DB_PASSWORD"] + try: + # TODO: Include flags for non-https and non-xhiveerp installation + subprocess.run( + [ + which("docker"), + "compose", + "--project-name", + project, + "-f", + "compose.yaml", + "-f", + "overrides/compose.mariadb.yaml", + "-f", + "overrides/compose.redis.yaml", + # "-f", "overrides/compose.noproxy.yaml", TODO: Add support for local proxying without HTTPs + "-f", + "overrides/compose.https.yaml", + "--env-file", + ".env", + "config", + ], + cwd=docker_repo_path, + stdout=f, + check=True, + ) + + except Exception: + logging.error("Docker Compose generation failed", exc_info=True) + cprint("\nGenerating Compose File failed\n") + sys.exit(1) + + # Use custom image + if image: + for line in fileinput.input(compose_file_name, inplace=True): + if "image: xhiveframework/xhiveerp" in line: + line = line.replace("image: xhiveframework/xhiveerp", f"image: {image}") + sys.stdout.write(line) + + try: + # Starting with generated compose file + subprocess.run( + [ + which("docker"), + "compose", + "-p", + project, + "-f", + compose_file_name, + "up", + "-d", + ], + check=True, + ) + logging.info(f"Docker Compose file generated at ~/{project}-compose.yml") + + except Exception as e: + logging.error("Prod docker-compose failed", exc_info=True) + cprint(" Docker Compose failed, please check the container logs\n", e) + sys.exit(1) + + for sitename in sites: + create_site(sitename, project, db_pass, admin_pass) + + else: + install_docker() + clone_xhiveframework_docker_repo() + setup_prod(project, sites, email, version, image) # Recursive + + +def setup_dev_instance(project: str): + if check_repo_exists(): + try: + subprocess.run( + [ + "docker", + "compose", + "-f", + "devcontainer-example/docker-compose.yml", + "--project-name", + project, + "up", + "-d", + ], + cwd=os.path.join(os.getcwd(), "xhiveframework_docker"), + check=True, + ) + cprint( + "Please go through the Development Documentation: https://lab.membtech.com/xhiveframework/xhiveframework_docker/tree/main/development to fully complete the setup.", + level=2, + ) + logging.info("Development Setup completed") + except Exception as e: + logging.error("Dev Environment setup failed", exc_info=True) + cprint("Setting Up Development Environment Failed\n", e) + else: + install_docker() + clone_xhiveframework_docker_repo() + setup_dev_instance(project) # Recursion on goes brrrr + + +def install_docker(): + if which("docker") is not None: + return + cprint("Docker is not installed, Installing Docker...", level=3) + logging.info("Docker not found, installing Docker") + if platform.system() == "Darwin" or platform.system() == "Windows": + print( + f""" + This script doesn't install Docker on {"Mac" if platform.system()=="Darwin" else "Windows"}. + + Please go through the Docker Installation docs for your system and run this script again""" + ) + logging.debug("Docker setup failed due to platform is not Linux") + sys.exit(1) + try: + ps = subprocess.run( + ["curl", "-fsSL", "https://get.docker.com"], + capture_output=True, + check=True, + ) + subprocess.run(["/bin/bash"], input=ps.stdout, capture_output=True) + subprocess.run( + ["sudo", "usermod", "-aG", "docker", str(os.getenv("USER"))], check=True + ) + cprint("Waiting Docker to start", level=3) + time.sleep(10) + subprocess.run(["sudo", "systemctl", "restart", "docker.service"], check=True) + except Exception as e: + logging.error("Installing Docker failed", exc_info=True) + cprint("Failed to Install Docker\n", e) + cprint("\n Try Installing Docker Manually and re-run this script again\n") + sys.exit(1) + + +def create_site( + sitename: str, + project: str, + db_pass: str, + admin_pass: str, +): + cprint(f"\nCreating site: {sitename} \n", level=3) + + try: + subprocess.run( + [ + which("docker"), + "compose", + "-p", + project, + "exec", + "backend", + "bench", + "new-site", + sitename, + "--no-mariadb-socket", + "--db-root-password", + db_pass, + "--admin-password", + admin_pass, + "--install-app", + "xhiveerp", + "--set-default", + ], + check=True, + ) + logging.info("New site creation completed") + except Exception as e: + logging.error(f"Bench site creation failed for {sitename}", exc_info=True) + cprint(f"Bench Site creation failed for {sitename}\n", e) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Install Xhiveframework with Docker") + parser.add_argument( + "-p", "--prod", help="Setup Production System", action="store_true" + ) + parser.add_argument( + "-d", "--dev", help="Setup Development System", action="store_true" + ) + parser.add_argument( + "-s", + "--sitename", + help="Site Name(s) for your production bench", + default=[], + action="append", + dest="sites", + ) + parser.add_argument("-n", "--project", help="Project Name", default="xhiveframework") + parser.add_argument("-i", "--image", help="Full Image Name") + parser.add_argument( + "--email", help="Add email for the SSL.", required="--prod" in sys.argv + ) + parser.add_argument( + "-v", "--version", help="XhiveERP version to install, defaults to latest stable" + ) + args = parser.parse_args() + if args.dev: + cprint("\nSetting Up Development Instance\n", level=2) + logging.info("Running Development Setup") + setup_dev_instance(args.project) + elif args.prod: + cprint("\nSetting Up Production Instance\n", level=2) + logging.info("Running Production Setup") + if "example.com" in args.email: + cprint("Emails with example.com not acceptable", level=1) + sys.exit(1) + setup_prod(args.project, args.sites, args.email, args.version, args.image) + else: + parser.print_help() diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..64a8b6c --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,62 @@ +[project] +name = "xhiveframework-bench" +description = "CLI to manage Multi-tenant deployments for Xhiveframework apps" +readme = "README.md" +license = "GPL-3.0-only" +requires-python = ">=3.7" +authors = [ + { name = "Xhiveframework Technologies Pvt Ltd", email = "developers@xhiveframework.io" }, +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Environment :: Console", + "License :: OSI Approved :: GNU General Public License v3 (GPLv3)", + "Natural Language :: English", + "Operating System :: MacOS", + "Operating System :: OS Independent", + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: User Interfaces", + "Topic :: System :: Installation/Setup", +] +dependencies = [ + "Click>=7.0", + "GitPython~=3.1.30", + "honcho", + "Jinja2~=3.1.3", + "python-crontab~=2.6.0", + "requests", + "semantic-version~=2.8.2", + "setuptools>40.9.0", + "tomli;python_version<'3.11'", +] +dynamic = [ + "version", +] + +[project.scripts] +bench = "bench.cli:cli" + +[project.urls] +Changelog = "https://lab.membtech.com/xhiveframework/bench_new.git" +Documentation = "https://lab.membtech.com/xhiveframework/bench_new.git" +Homepage = "https://lab.membtech.com/xhiveframework/bench_new.git" +Source = "https://lab.membtech.com/xhiveframework/bench_new.git" + +[build-system] +requires = [ + "hatchling>=1.6.0,<=1.21.0", +] +build-backend = "hatchling.build" + +[tool.hatch.version] +path = "bench/__init__.py" + +[tool.hatch.build.targets.sdist] +include = [ + "/bench" +] + +[tool.hatch.build.targets.wheel] +include = [ + "/bench" +]