diff --git a/lib/ansible/modules/extras/CONTRIBUTING.md b/lib/ansible/modules/extras/CONTRIBUTING.md new file mode 100644 index 00000000000..e441a4e3527 --- /dev/null +++ b/lib/ansible/modules/extras/CONTRIBUTING.md @@ -0,0 +1,28 @@ +Welcome To Ansible GitHub +========================= + +Hi! Nice to see you here! + +If you'd like to ask a question +=============================== + +Please see [this web page ](http://docs.ansible.com/community.html) for community information, which includes pointers on how to ask questions on the [mailing lists](http://docs.ansible.com/community.html#mailing-list-information) and IRC. + +The github issue tracker is not the best place for questions for various reasons, but both IRC and the mailing list are very helpful places for those things, and that page has the pointers to those. + +If you'd like to contribute code +================================ + +Please see [this web page](http://docs.ansible.com/community.html) for information about the contribution process. Important license agreement information is also included on that page. + +If you'd like to file a bug +=========================== + +I'd also read the community page above, but in particular, make sure you copy [this issue template](https://github.com/ansible/ansible/blob/devel/ISSUE_TEMPLATE.md) into your ticket description. We have a friendly neighborhood bot that will remind you if you forget :) This template helps us organize tickets faster and prevents asking some repeated questions, so it's very helpful to us and we appreciate your help with it. + +Also please make sure you are testing on the latest released version of Ansible or the development branch. + +Thanks! + + + diff --git a/lib/ansible/modules/extras/COPYING b/lib/ansible/modules/extras/COPYING new file mode 100644 index 00000000000..10926e87f11 --- /dev/null +++ b/lib/ansible/modules/extras/COPYING @@ -0,0 +1,675 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. + diff --git a/lib/ansible/modules/extras/README.md b/lib/ansible/modules/extras/README.md new file mode 100644 index 00000000000..0cdb6d9fc0d --- /dev/null +++ b/lib/ansible/modules/extras/README.md @@ -0,0 +1,15 @@ +ansible-modules-core +==================== + +This repo is transitional and not yet enabled. Do not send issues or pull requests here yet. + +The goal is to split library/ from ansible into git submodules. + +See details: https://groups.google.com/forum/#!topic/ansible-devel/W2FYdXZ7zlM + +Once complete, core modules will be manually promoted from new submissions to extras. New modules must be submitted first to extras, and tickets should be opened in the appropriate repo (ansible/ansible, ansible/ansible-modules-core, or ansible/ansible-modules-extras). + +License +======= + +As with Ansible, modules distributed with Ansible are GPLv3 licensed. User generated modules not part of this project can be of any license. diff --git a/lib/ansible/modules/extras/cloud/ovirt b/lib/ansible/modules/extras/cloud/ovirt new file mode 100755 index 00000000000..fb84e918001 --- /dev/null +++ b/lib/ansible/modules/extras/cloud/ovirt @@ -0,0 +1,425 @@ +#!/usr/bin/python + +# (c) 2013, Vincent Van der Kussen +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: ovirt +author: Vincent Van der Kussen +short_description: oVirt/RHEV platform management +description: + - allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform +version_added: "1.4" +options: + user: + description: + - the user to authenticate with + default: null + required: true + aliases: [] + url: + description: + - the url of the oVirt instance + default: null + required: true + aliases: [] + instance_name: + description: + - the name of the instance to use + default: null + required: true + aliases: [ vmname ] + password: + description: + - password of the user to authenticate with + default: null + required: true + aliases: [] + image: + description: + - template to use for the instance + default: null + required: false + aliases: [] + resource_type: + description: + - whether you want to deploy an image or create an instance from scratch. + default: null + required: false + aliases: [] + choices: [ 'new', 'template' ] + zone: + description: + - deploy the image to this oVirt cluster + default: null + required: false + aliases: [] + instance_disksize: + description: + - size of the instance's disk in GB + default: null + required: false + aliases: [ vm_disksize] + instance_cpus: + description: + - the instance's number of cpu's + default: 1 + required: false + aliases: [ vmcpus ] + instance_nic: + description: + - name of the network interface in oVirt/RHEV + default: null + required: false + aliases: [ vmnic ] + instance_network: + description: + - the logical network the machine should belong to + default: rhevm + required: false + aliases: [ vmnetwork ] + instance_mem: + description: + - the instance's amount of memory in MB + default: null + required: false + aliases: [ vmmem ] + instance_type: + description: + - define if the instance is a server or desktop + default: server + required: false + aliases: [ vmtype ] + choices: [ 'server', 'desktop' ] + disk_alloc: + description: + - define if disk is thin or preallocated + default: thin + required: false + aliases: [] + choices: [ 'thin', 'preallocated' ] + disk_int: + description: + - interface type of the disk + default: virtio + required: false + aliases: [] + choices: [ 'virtio', 'ide' ] + instance_os: + description: + - type of Operating System + default: null + required: false + aliases: [ vmos ] + instance_cores: + description: + - define the instance's number of cores + default: 1 + required: false + aliases: [ vmcores ] + sdomain: + description: + - the Storage Domain where you want to create the instance's disk on. + default: null + required: false + aliases: [] + region: + description: + - the oVirt/RHEV datacenter where you want to deploy to + default: null + required: false + aliases: [] + state: + description: + - create, terminate or remove instances + default: 'present' + required: false + aliases: [] + choices: ['present', 'absent', 'shutdown', 'started', 'restarted'] + +requirements: [ "ovirt-engine-sdk" ] +''' +EXAMPLES = ''' +# Basic example provisioning from image. + +action: ovirt > + user=admin@internal + url=https://ovirt.example.com + instance_name=ansiblevm04 + password=secret + image=centos_64 + zone=cluster01 + resource_type=template" + +# Full example to create new instance from scratch +action: ovirt > + instance_name=testansible + resource_type=new + instance_type=server + user=admin@internal + password=secret + url=https://ovirt.example.com + instance_disksize=10 + zone=cluster01 + region=datacenter1 + instance_cpus=1 + instance_nic=nic1 + instance_network=rhevm + instance_mem=1000 + disk_alloc=thin + sdomain=FIBER01 + instance_cores=1 + instance_os=rhel_6x64 + disk_int=virtio" + +# stopping an instance +action: ovirt > + instance_name=testansible + state=stopped + user=admin@internal + password=secret + url=https://ovirt.example.com + +# starting an instance +action: ovirt > + instance_name=testansible + state=started + user=admin@internal + password=secret + url=https://ovirt.example.com + + +''' +try: + from ovirtsdk.api import API + from ovirtsdk.xml import params +except ImportError: + print "failed=True msg='ovirtsdk required for this module'" + sys.exit(1) + +# ------------------------------------------------------------------- # +# create connection with API +# +def conn(url, user, password): + api = API(url=url, username=user, password=password, insecure=True) + try: + value = api.test() + except: + print "error connecting to the oVirt API" + sys.exit(1) + return api + +# ------------------------------------------------------------------- # +# Create VM from scratch +def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int): + if vmdisk_alloc == 'thin': + # define VM params + vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype) + # define disk params + vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow', + storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) + # define network parameters + network_net = params.Network(name=vmnetwork) + nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio') + elif vmdisk_alloc == 'preallocated': + # define VM params + vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype) + # define disk params + vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw', + storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)])) + # define network parameters + network_net = params.Network(name=vmnetwork) + nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio') + + try: + conn.vms.add(vmparams) + except: + print "Error creating VM with specified parameters" + sys.exit(1) + vm = conn.vms.get(name=vmname) + try: + vm.disks.add(vmdisk) + except: + print "Error attaching disk" + try: + vm.nics.add(nic_net1) + except: + print "Error adding nic" + + +# create an instance from a template +def create_vm_template(conn, vmname, image, zone): + vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image),disks=params.Disks(clone=True)) + try: + conn.vms.add(vmparams) + except: + print 'error adding template %s' % image + sys.exit(1) + + +# start instance +def vm_start(conn, vmname): + vm = conn.vms.get(name=vmname) + vm.start() + +# Stop instance +def vm_stop(conn, vmname): + vm = conn.vms.get(name=vmname) + vm.stop() + +# restart instance +def vm_restart(conn, vmname): + state = vm_status(conn, vmname) + vm = conn.vms.get(name=vmname) + vm.stop() + while conn.vms.get(vmname).get_status().get_state() != 'down': + time.sleep(5) + vm.start() + +# remove an instance +def vm_remove(conn, vmname): + vm = conn.vms.get(name=vmname) + vm.delete() + +# ------------------------------------------------------------------- # +# VM statuses +# +# Get the VMs status +def vm_status(conn, vmname): + status = conn.vms.get(name=vmname).status.state + print "vm status is : %s" % status + return status + + +# Get VM object and return it's name if object exists +def get_vm(conn, vmname): + vm = conn.vms.get(name=vmname) + if vm == None: + name = "empty" + print "vmname: %s" % name + else: + name = vm.get_name() + print "vmname: %s" % name + return name + +# ------------------------------------------------------------------- # +# Hypervisor operations +# +# not available yet +# ------------------------------------------------------------------- # +# Main + +def main(): + + module = AnsibleModule( + argument_spec = dict( + state = dict(default='present', choices=['present', 'absent', 'shutdown', 'started', 'restart']), + #name = dict(required=True), + user = dict(required=True), + url = dict(required=True), + instance_name = dict(required=True, aliases=['vmname']), + password = dict(required=True), + image = dict(), + resource_type = dict(choices=['new', 'template']), + zone = dict(), + instance_disksize = dict(aliases=['vm_disksize']), + instance_cpus = dict(default=1, aliases=['vmcpus']), + instance_nic = dict(aliases=['vmnic']), + instance_network = dict(default='rhevm', aliases=['vmnetwork']), + instance_mem = dict(aliases=['vmmem']), + instance_type = dict(default='server', aliases=['vmtype'], choices=['server', 'desktop']), + disk_alloc = dict(default='thin', choices=['thin', 'preallocated']), + disk_int = dict(default='virtio', choices=['virtio', 'ide']), + instance_os = dict(aliases=['vmos']), + instance_cores = dict(default=1, aliases=['vmcores']), + sdomain = dict(), + region = dict(), + ) + ) + + state = module.params['state'] + user = module.params['user'] + url = module.params['url'] + vmname = module.params['instance_name'] + password = module.params['password'] + image = module.params['image'] # name of the image to deploy + resource_type = module.params['resource_type'] # template or from scratch + zone = module.params['zone'] # oVirt cluster + vmdisk_size = module.params['instance_disksize'] # disksize + vmcpus = module.params['instance_cpus'] # number of cpu + vmnic = module.params['instance_nic'] # network interface + vmnetwork = module.params['instance_network'] # logical network + vmmem = module.params['instance_mem'] # mem size + vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated + vmdisk_int = module.params['disk_int'] # disk interface virtio or ide + vmos = module.params['instance_os'] # Operating System + vmtype = module.params['instance_type'] # server or desktop + vmcores = module.params['instance_cores'] # number of cores + sdomain = module.params['sdomain'] # storage domain to store disk on + region = module.params['region'] # oVirt Datacenter + #initialize connection + c = conn(url+"/api", user, password) + + if state == 'present': + if get_vm(c, vmname) == "empty": + if resource_type == 'template': + create_vm_template(c, vmname, image, zone) + module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname,image)) + elif resource_type == 'new': + # FIXME: refactor, use keyword args. + create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int) + module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname) + else: + module.exit_json(changed=False, msg="You did not specify a resource type") + else: + module.exit_json(changed=False, msg="VM %s already exists" % vmname) + + if state == 'started': + if vm_status(c, vmname) == 'up': + module.exit_json(changed=False, msg="VM %s is already running" % vmname) + else: + vm_start(c, vmname) + module.exit_json(changed=True, msg="VM %s started" % vmname) + + if state == 'shutdown': + if vm_status(c, vmname) == 'down': + module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname) + else: + vm_stop(c, vmname) + module.exit_json(changed=True, msg="VM %s is shutting down" % vmname) + + if state == 'restart': + if vm_status(c, vmname) == 'up': + vm_restart(c, vmname) + module.exit_json(changed=True, msg="VM %s is restarted" % vmname) + else: + module.exit_json(changed=False, msg="VM %s is not running" % vmname) + + if state == 'absent': + if get_vm(c, vmname) == "empty": + module.exit_json(changed=False, msg="VM %s does not exist" % vmname) + else: + vm_remove(c, vmname) + module.exit_json(changed=True, msg="VM %s removed" % vmname) + + + + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/cloud/virt b/lib/ansible/modules/extras/cloud/virt new file mode 100644 index 00000000000..f1d36fc1964 --- /dev/null +++ b/lib/ansible/modules/extras/cloud/virt @@ -0,0 +1,493 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +""" +Virt management features + +Copyright 2007, 2012 Red Hat, Inc +Michael DeHaan +Seth Vidal + +This software may be freely redistributed under the terms of the GNU +general public license. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . +""" + +DOCUMENTATION = ''' +--- +module: virt +short_description: Manages virtual machines supported by libvirt +description: + - Manages virtual machines supported by I(libvirt). +version_added: "0.2" +options: + name: + description: + - name of the guest VM being managed. Note that VM must be previously + defined with xml. + required: true + default: null + aliases: [] + state: + description: + - Note that there may be some lag for state requests like C(shutdown) + since these refer only to VM states. After starting a guest, it may not + be immediately accessible. + required: false + choices: [ "running", "shutdown", "destroyed", "paused" ] + default: "no" + command: + description: + - in addition to state management, various non-idempotent commands are available. See examples + required: false + choices: ["create","status", "start", "stop", "pause", "unpause", + "shutdown", "undefine", "destroy", "get_xml", "autostart", + "freemem", "list_vms", "info", "nodeinfo", "virttype", "define"] + uri: + description: + - libvirt connection uri + required: false + defaults: qemu:///system + xml: + description: + - XML document used with the define command + required: false + default: null +requirements: [ "libvirt" ] +author: Michael DeHaan, Seth Vidal +''' + +EXAMPLES = ''' +# a playbook task line: +- virt: name=alpha state=running + +# /usr/bin/ansible invocations +ansible host -m virt -a "name=alpha command=status" +ansible host -m virt -a "name=alpha command=get_xml" +ansible host -m virt -a "name=alpha command=create uri=lxc:///" + +# a playbook example of defining and launching an LXC guest +tasks: + - name: define vm + virt: name=foo + command=define + xml="{{ lookup('template', 'container-template.xml.j2') }}" + uri=lxc:/// + - name: start vm + virt: name=foo state=running uri=lxc:/// +''' + +VIRT_FAILED = 1 +VIRT_SUCCESS = 0 +VIRT_UNAVAILABLE=2 + +import sys + +try: + import libvirt +except ImportError: + print "failed=True msg='libvirt python module unavailable'" + sys.exit(1) + +ALL_COMMANDS = [] +VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause', + 'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart', 'define'] +HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype'] +ALL_COMMANDS.extend(VM_COMMANDS) +ALL_COMMANDS.extend(HOST_COMMANDS) + +VIRT_STATE_NAME_MAP = { + 0 : "running", + 1 : "running", + 2 : "running", + 3 : "paused", + 4 : "shutdown", + 5 : "shutdown", + 6 : "crashed" +} + +class VMNotFound(Exception): + pass + +class LibvirtConnection(object): + + def __init__(self, uri, module): + + self.module = module + + cmd = "uname -r" + rc, stdout, stderr = self.module.run_command(cmd) + + if "xen" in stdout: + conn = libvirt.open(None) + else: + conn = libvirt.open(uri) + + if not conn: + raise Exception("hypervisor connection failure") + + self.conn = conn + + def find_vm(self, vmid): + """ + Extra bonus feature: vmid = -1 returns a list of everything + """ + conn = self.conn + + vms = [] + + # this block of code borrowed from virt-manager: + # get working domain's name + ids = conn.listDomainsID() + for id in ids: + vm = conn.lookupByID(id) + vms.append(vm) + # get defined domain + names = conn.listDefinedDomains() + for name in names: + vm = conn.lookupByName(name) + vms.append(vm) + + if vmid == -1: + return vms + + for vm in vms: + if vm.name() == vmid: + return vm + + raise VMNotFound("virtual machine %s not found" % vmid) + + def shutdown(self, vmid): + return self.find_vm(vmid).shutdown() + + def pause(self, vmid): + return self.suspend(self.conn,vmid) + + def unpause(self, vmid): + return self.resume(self.conn,vmid) + + def suspend(self, vmid): + return self.find_vm(vmid).suspend() + + def resume(self, vmid): + return self.find_vm(vmid).resume() + + def create(self, vmid): + return self.find_vm(vmid).create() + + def destroy(self, vmid): + return self.find_vm(vmid).destroy() + + def undefine(self, vmid): + return self.find_vm(vmid).undefine() + + def get_status2(self, vm): + state = vm.info()[0] + return VIRT_STATE_NAME_MAP.get(state,"unknown") + + def get_status(self, vmid): + state = self.find_vm(vmid).info()[0] + return VIRT_STATE_NAME_MAP.get(state,"unknown") + + def nodeinfo(self): + return self.conn.getInfo() + + def get_type(self): + return self.conn.getType() + + def get_xml(self, vmid): + vm = self.conn.lookupByName(vmid) + return vm.XMLDesc(0) + + def get_maxVcpus(self, vmid): + vm = self.conn.lookupByName(vmid) + return vm.maxVcpus() + + def get_maxMemory(self, vmid): + vm = self.conn.lookupByName(vmid) + return vm.maxMemory() + + def getFreeMemory(self): + return self.conn.getFreeMemory() + + def get_autostart(self, vmid): + vm = self.conn.lookupByName(vmid) + return vm.autostart() + + def set_autostart(self, vmid, val): + vm = self.conn.lookupByName(vmid) + return vm.setAutostart(val) + + def define_from_xml(self, xml): + return self.conn.defineXML(xml) + + +class Virt(object): + + def __init__(self, uri, module): + self.module = module + self.uri = uri + + def __get_conn(self): + self.conn = LibvirtConnection(self.uri, self.module) + return self.conn + + def get_vm(self, vmid): + self.__get_conn() + return self.conn.find_vm(vmid) + + def state(self): + vms = self.list_vms() + state = [] + for vm in vms: + state_blurb = self.conn.get_status(vm) + state.append("%s %s" % (vm,state_blurb)) + return state + + def info(self): + vms = self.list_vms() + info = dict() + for vm in vms: + data = self.conn.find_vm(vm).info() + # libvirt returns maxMem, memory, and cpuTime as long()'s, which + # xmlrpclib tries to convert to regular int's during serialization. + # This throws exceptions, so convert them to strings here and + # assume the other end of the xmlrpc connection can figure things + # out or doesn't care. + info[vm] = { + "state" : VIRT_STATE_NAME_MAP.get(data[0],"unknown"), + "maxMem" : str(data[1]), + "memory" : str(data[2]), + "nrVirtCpu" : data[3], + "cpuTime" : str(data[4]), + } + info[vm]["autostart"] = self.conn.get_autostart(vm) + + return info + + def nodeinfo(self): + self.__get_conn() + info = dict() + data = self.conn.nodeinfo() + info = { + "cpumodel" : str(data[0]), + "phymemory" : str(data[1]), + "cpus" : str(data[2]), + "cpumhz" : str(data[3]), + "numanodes" : str(data[4]), + "sockets" : str(data[5]), + "cpucores" : str(data[6]), + "cputhreads" : str(data[7]) + } + return info + + def list_vms(self, state=None): + self.conn = self.__get_conn() + vms = self.conn.find_vm(-1) + results = [] + for x in vms: + try: + if state: + vmstate = self.conn.get_status2(x) + if vmstate == state: + results.append(x.name()) + else: + results.append(x.name()) + except: + pass + return results + + def virttype(self): + return self.__get_conn().get_type() + + def autostart(self, vmid): + self.conn = self.__get_conn() + return self.conn.set_autostart(vmid, True) + + def freemem(self): + self.conn = self.__get_conn() + return self.conn.getFreeMemory() + + def shutdown(self, vmid): + """ Make the machine with the given vmid stop running. Whatever that takes. """ + self.__get_conn() + self.conn.shutdown(vmid) + return 0 + + + def pause(self, vmid): + """ Pause the machine with the given vmid. """ + + self.__get_conn() + return self.conn.suspend(vmid) + + def unpause(self, vmid): + """ Unpause the machine with the given vmid. """ + + self.__get_conn() + return self.conn.resume(vmid) + + def create(self, vmid): + """ Start the machine via the given vmid """ + + self.__get_conn() + return self.conn.create(vmid) + + def start(self, vmid): + """ Start the machine via the given id/name """ + + self.__get_conn() + return self.conn.create(vmid) + + def destroy(self, vmid): + """ Pull the virtual power from the virtual domain, giving it virtually no time to virtually shut down. """ + self.__get_conn() + return self.conn.destroy(vmid) + + def undefine(self, vmid): + """ Stop a domain, and then wipe it from the face of the earth. (delete disk/config file) """ + + self.__get_conn() + return self.conn.undefine(vmid) + + def status(self, vmid): + """ + Return a state suitable for server consumption. Aka, codes.py values, not XM output. + """ + self.__get_conn() + return self.conn.get_status(vmid) + + def get_xml(self, vmid): + """ + Receive a Vm id as input + Return an xml describing vm config returned by a libvirt call + """ + + self.__get_conn() + return self.conn.get_xml(vmid) + + def get_maxVcpus(self, vmid): + """ + Gets the max number of VCPUs on a guest + """ + + self.__get_conn() + return self.conn.get_maxVcpus(vmid) + + def get_max_memory(self, vmid): + """ + Gets the max memory on a guest + """ + + self.__get_conn() + return self.conn.get_MaxMemory(vmid) + + def define(self, xml): + """ + Define a guest with the given xml + """ + self.__get_conn() + return self.conn.define_from_xml(xml) + +def core(module): + + state = module.params.get('state', None) + guest = module.params.get('name', None) + command = module.params.get('command', None) + uri = module.params.get('uri', None) + xml = module.params.get('xml', None) + + v = Virt(uri, module) + res = {} + + if state and command=='list_vms': + res = v.list_vms(state=state) + if type(res) != dict: + res = { command: res } + return VIRT_SUCCESS, res + + if state: + if not guest: + module.fail_json(msg = "state change requires a guest specified") + + res['changed'] = False + if state == 'running': + if v.status(guest) is 'paused': + res['changed'] = True + res['msg'] = v.unpause(guest) + elif v.status(guest) is not 'running': + res['changed'] = True + res['msg'] = v.start(guest) + elif state == 'shutdown': + if v.status(guest) is not 'shutdown': + res['changed'] = True + res['msg'] = v.shutdown(guest) + elif state == 'destroyed': + if v.status(guest) is not 'shutdown': + res['changed'] = True + res['msg'] = v.destroy(guest) + elif state == 'paused': + if v.status(guest) is 'running': + res['changed'] = True + res['msg'] = v.pause(guest) + else: + module.fail_json(msg="unexpected state") + + return VIRT_SUCCESS, res + + if command: + if command in VM_COMMANDS: + if not guest: + module.fail_json(msg = "%s requires 1 argument: guest" % command) + if command == 'define': + if not xml: + module.fail_json(msg = "define requires xml argument") + try: + v.get_vm(guest) + except VMNotFound: + v.define(xml) + res = {'changed': True, 'created': guest} + return VIRT_SUCCESS, res + res = getattr(v, command)(guest) + if type(res) != dict: + res = { command: res } + return VIRT_SUCCESS, res + + elif hasattr(v, command): + res = getattr(v, command)() + if type(res) != dict: + res = { command: res } + return VIRT_SUCCESS, res + + else: + module.fail_json(msg="Command %s not recognized" % basecmd) + + module.fail_json(msg="expected state or command parameter to be specified") + +def main(): + + module = AnsibleModule(argument_spec=dict( + name = dict(aliases=['guest']), + state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']), + command = dict(choices=ALL_COMMANDS), + uri = dict(default='qemu:///system'), + xml = dict(), + )) + + rc = VIRT_SUCCESS + try: + rc, result = core(module) + except Exception, e: + module.fail_json(msg=str(e)) + + if rc != 0: # something went wrong emit the msg + module.fail_json(rc=rc, msg=result) + else: + module.exit_json(**result) + + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/database/mongodb_user b/lib/ansible/modules/extras/database/mongodb_user new file mode 100644 index 00000000000..5d7e0897b68 --- /dev/null +++ b/lib/ansible/modules/extras/database/mongodb_user @@ -0,0 +1,242 @@ +#!/usr/bin/python + +# (c) 2012, Elliott Foster +# Sponsored by Four Kitchens http://fourkitchens.com. +# (c) 2014, Epic Games, Inc. +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: mongodb_user +short_description: Adds or removes a user from a MongoDB database. +description: + - Adds or removes a user from a MongoDB database. +version_added: "1.1" +options: + login_user: + description: + - The username used to authenticate with + required: false + default: null + login_password: + description: + - The password used to authenticate with + required: false + default: null + login_host: + description: + - The host running the database + required: false + default: localhost + login_port: + description: + - The port to connect to + required: false + default: 27017 + replica_set: + version_added: "1.6" + description: + - Replica set to connect to (automatically connects to primary for writes) + required: false + default: null + database: + description: + - The name of the database to add/remove the user from + required: true + user: + description: + - The name of the user to add or remove + required: true + default: null + password: + description: + - The password to use for the user + required: false + default: null + roles: + version_added: "1.3" + description: + - "The database user roles valid values are one or more of the following: read, 'readWrite', 'dbAdmin', 'userAdmin', 'clusterAdmin', 'readAnyDatabase', 'readWriteAnyDatabase', 'userAdminAnyDatabase', 'dbAdminAnyDatabase'" + - This param requires mongodb 2.4+ and pymongo 2.5+ + required: false + default: "readWrite" + state: + state: + description: + - The database user state + required: false + default: present + choices: [ "present", "absent" ] +notes: + - Requires the pymongo Python package on the remote host, version 2.4.2+. This + can be installed using pip or the OS package manager. @see http://api.mongodb.org/python/current/installation.html +requirements: [ "pymongo" ] +author: Elliott Foster +''' + +EXAMPLES = ''' +# Create 'burgers' database user with name 'bob' and password '12345'. +- mongodb_user: database=burgers name=bob password=12345 state=present + +# Delete 'burgers' database user with name 'bob'. +- mongodb_user: database=burgers name=bob state=absent + +# Define more users with various specific roles (if not defined, no roles is assigned, and the user will be added via pre mongo 2.2 style) +- mongodb_user: database=burgers name=ben password=12345 roles='read' state=present +- mongodb_user: database=burgers name=jim password=12345 roles='readWrite,dbAdmin,userAdmin' state=present +- mongodb_user: database=burgers name=joe password=12345 roles='readWriteAnyDatabase' state=present + +# add a user to database in a replica set, the primary server is automatically discovered and written to +- mongodb_user: database=burgers name=bob replica_set=blecher password=12345 roles='readWriteAnyDatabase' state=present +''' + +import ConfigParser +from distutils.version import LooseVersion +try: + from pymongo.errors import ConnectionFailure + from pymongo.errors import OperationFailure + from pymongo import version as PyMongoVersion + from pymongo import MongoClient +except ImportError: + try: # for older PyMongo 2.2 + from pymongo import Connection as MongoClient + except ImportError: + pymongo_found = False + else: + pymongo_found = True +else: + pymongo_found = True + +# ========================================= +# MongoDB module specific support methods. +# + +def user_add(module, client, db_name, user, password, roles): + db = client[db_name] + if roles is None: + db.add_user(user, password, False) + else: + try: + db.add_user(user, password, None, roles=roles) + except OperationFailure, e: + err_msg = str(e) + if LooseVersion(PyMongoVersion) <= LooseVersion('2.5'): + err_msg = err_msg + ' (Note: you must be on mongodb 2.4+ and pymongo 2.5+ to use the roles param)' + module.fail_json(msg=err_msg) + +def user_remove(client, db_name, user): + db = client[db_name] + db.remove_user(user) + +def load_mongocnf(): + config = ConfigParser.RawConfigParser() + mongocnf = os.path.expanduser('~/.mongodb.cnf') + + try: + config.readfp(open(mongocnf)) + creds = dict( + user=config.get('client', 'user'), + password=config.get('client', 'pass') + ) + except (ConfigParser.NoOptionError, IOError): + return False + + return creds + +# ========================================= +# Module execution. +# + +def main(): + module = AnsibleModule( + argument_spec = dict( + login_user=dict(default=None), + login_password=dict(default=None), + login_host=dict(default='localhost'), + login_port=dict(default='27017'), + replica_set=dict(default=None), + database=dict(required=True, aliases=['db']), + user=dict(required=True, aliases=['name']), + password=dict(aliases=['pass']), + roles=dict(default=None, type='list'), + state=dict(default='present', choices=['absent', 'present']), + ) + ) + + if not pymongo_found: + module.fail_json(msg='the python pymongo module is required') + + login_user = module.params['login_user'] + login_password = module.params['login_password'] + login_host = module.params['login_host'] + login_port = module.params['login_port'] + replica_set = module.params['replica_set'] + db_name = module.params['database'] + user = module.params['user'] + password = module.params['password'] + roles = module.params['roles'] + state = module.params['state'] + + try: + if replica_set: + client = MongoClient(login_host, int(login_port), replicaset=replica_set) + else: + client = MongoClient(login_host, int(login_port)) + + # try to authenticate as a target user to check if it already exists + try: + client[db_name].authenticate(user, password) + if state == 'present': + module.exit_json(changed=False, user=user) + except OperationFailure: + if state == 'absent': + module.exit_json(changed=False, user=user) + + if login_user is None and login_password is None: + mongocnf_creds = load_mongocnf() + if mongocnf_creds is not False: + login_user = mongocnf_creds['user'] + login_password = mongocnf_creds['password'] + elif login_password is None and login_user is not None: + module.fail_json(msg='when supplying login arguments, both login_user and login_password must be provided') + + if login_user is not None and login_password is not None: + client.admin.authenticate(login_user, login_password) + + except ConnectionFailure, e: + module.fail_json(msg='unable to connect to database: %s' % str(e)) + + if state == 'present': + if password is None: + module.fail_json(msg='password parameter required when adding a user') + + try: + user_add(module, client, db_name, user, password, roles) + except OperationFailure, e: + module.fail_json(msg='Unable to add or update user: %s' % str(e)) + + elif state == 'absent': + try: + user_remove(client, db_name, user) + except OperationFailure, e: + module.fail_json(msg='Unable to remove user: %s' % str(e)) + + module.exit_json(changed=True, user=user) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/database/mysql_replication b/lib/ansible/modules/extras/database/mysql_replication new file mode 100644 index 00000000000..d965f3ce0d4 --- /dev/null +++ b/lib/ansible/modules/extras/database/mysql_replication @@ -0,0 +1,369 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +""" + +Ansible module to manage mysql replication +(c) 2013, Balazs Pocze +Certain parts are taken from Mark Theunissen's mysqldb module + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +DOCUMENTATION = ''' +--- +module: mysql_replication + +short_description: Manage MySQL replication +description: + - Manages MySQL server replication, slave, master status get and change master host. +version_added: "1.3" +options: + mode: + description: + - module operating mode. Could be getslave (SHOW SLAVE STATUS), getmaster (SHOW MASTER STATUS), changemaster (CHANGE MASTER TO), startslave (START SLAVE), stopslave (STOP SLAVE) + required: False + choices: + - getslave + - getmaster + - changemaster + - stopslave + - startslave + default: getslave + login_user: + description: + - username to connect mysql host, if defined login_password also needed. + required: False + login_password: + description: + - password to connect mysql host, if defined login_user also needed. + required: False + login_host: + description: + - mysql host to connect + required: False + login_unix_socket: + description: + - unix socket to connect mysql server + master_host: + description: + - same as mysql variable + master_user: + description: + - same as mysql variable + master_password: + description: + - same as mysql variable + master_port: + description: + - same as mysql variable + master_connect_retry: + description: + - same as mysql variable + master_log_file: + description: + - same as mysql variable + master_log_pos: + description: + - same as mysql variable + relay_log_file: + description: + - same as mysql variable + relay_log_pos: + description: + - same as mysql variable + master_ssl: + description: + - same as mysql variable + possible values: 0,1 + master_ssl_ca: + description: + - same as mysql variable + master_ssl_capath: + description: + - same as mysql variable + master_ssl_cert: + description: + - same as mysql variable + master_ssl_key: + description: + - same as mysql variable + master_ssl_cipher: + description: + - same as mysql variable + +''' + +EXAMPLES = ''' +# Stop mysql slave thread +- mysql_replication: mode=stopslave + +# Get master binlog file name and binlog position +- mysql_replication: mode=getmaster + +# Change master to master server 192.168.1.1 and use binary log 'mysql-bin.000009' with position 4578 +- mysql_replication: mode=changemaster master_host=192.168.1.1 master_log_file=mysql-bin.000009 master_log_pos=4578 +''' + +import ConfigParser +import os +import warnings + +try: + import MySQLdb +except ImportError: + mysqldb_found = False +else: + mysqldb_found = True + + +def get_master_status(cursor): + cursor.execute("SHOW MASTER STATUS") + masterstatus = cursor.fetchone() + return masterstatus + + +def get_slave_status(cursor): + cursor.execute("SHOW SLAVE STATUS") + slavestatus = cursor.fetchone() + return slavestatus + + +def stop_slave(cursor): + try: + cursor.execute("STOP SLAVE") + stopped = True + except: + stopped = False + return stopped + + +def start_slave(cursor): + try: + cursor.execute("START SLAVE") + started = True + except: + started = False + return started + + +def changemaster(cursor, chm): + SQLPARAM = ",".join(chm) + cursor.execute("CHANGE MASTER TO " + SQLPARAM) + + +def strip_quotes(s): + """ Remove surrounding single or double quotes + + >>> print strip_quotes('hello') + hello + >>> print strip_quotes('"hello"') + hello + >>> print strip_quotes("'hello'") + hello + >>> print strip_quotes("'hello") + 'hello + + """ + single_quote = "'" + double_quote = '"' + + if s.startswith(single_quote) and s.endswith(single_quote): + s = s.strip(single_quote) + elif s.startswith(double_quote) and s.endswith(double_quote): + s = s.strip(double_quote) + return s + + +def config_get(config, section, option): + """ Calls ConfigParser.get and strips quotes + + See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html + """ + return strip_quotes(config.get(section, option)) + + +def load_mycnf(): + config = ConfigParser.RawConfigParser() + mycnf = os.path.expanduser('~/.my.cnf') + if not os.path.exists(mycnf): + return False + try: + config.readfp(open(mycnf)) + except (IOError): + return False + # We support two forms of passwords in .my.cnf, both pass= and password=, + # as these are both supported by MySQL. + try: + passwd = config_get(config, 'client', 'password') + except (ConfigParser.NoOptionError): + try: + passwd = config_get(config, 'client', 'pass') + except (ConfigParser.NoOptionError): + return False + + # If .my.cnf doesn't specify a user, default to user login name + try: + user = config_get(config, 'client', 'user') + except (ConfigParser.NoOptionError): + user = getpass.getuser() + creds = dict(user=user, passwd=passwd) + return creds + + +def main(): + module = AnsibleModule( + argument_spec = dict( + login_user=dict(default=None), + login_password=dict(default=None), + login_host=dict(default="localhost"), + login_unix_socket=dict(default=None), + mode=dict(default="getslave", choices=["getmaster", "getslave", "changemaster", "stopslave", "startslave"]), + master_host=dict(default=None), + master_user=dict(default=None), + master_password=dict(default=None), + master_port=dict(default=None), + master_connect_retry=dict(default=None), + master_log_file=dict(default=None), + master_log_pos=dict(default=None), + relay_log_file=dict(default=None), + relay_log_pos=dict(default=None), + master_ssl=dict(default=False, type='bool'), + master_ssl_ca=dict(default=None), + master_ssl_capath=dict(default=None), + master_ssl_cert=dict(default=None), + master_ssl_key=dict(default=None), + master_ssl_cipher=dict(default=None), + ) + ) + user = module.params["login_user"] + password = module.params["login_password"] + host = module.params["login_host"] + mode = module.params["mode"] + master_host = module.params["master_host"] + master_user = module.params["master_user"] + master_password = module.params["master_password"] + master_port = module.params["master_port"] + master_connect_retry = module.params["master_connect_retry"] + master_log_file = module.params["master_log_file"] + master_log_pos = module.params["master_log_pos"] + relay_log_file = module.params["relay_log_file"] + relay_log_pos = module.params["relay_log_pos"] + master_ssl = module.params["master_ssl"] + master_ssl_ca = module.params["master_ssl_ca"] + master_ssl_capath = module.params["master_ssl_capath"] + master_ssl_cert = module.params["master_ssl_cert"] + master_ssl_key = module.params["master_ssl_key"] + master_ssl_cipher = module.params["master_ssl_cipher"] + + if not mysqldb_found: + module.fail_json(msg="the python mysqldb module is required") + else: + warnings.filterwarnings('error', category=MySQLdb.Warning) + + # Either the caller passes both a username and password with which to connect to + # mysql, or they pass neither and allow this module to read the credentials from + # ~/.my.cnf. + login_password = module.params["login_password"] + login_user = module.params["login_user"] + if login_user is None and login_password is None: + mycnf_creds = load_mycnf() + if mycnf_creds is False: + login_user = "root" + login_password = "" + else: + login_user = mycnf_creds["user"] + login_password = mycnf_creds["passwd"] + elif login_password is None or login_user is None: + module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided") + + try: + if module.params["login_unix_socket"]: + db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql") + else: + db_connection = MySQLdb.connect(host=module.params["login_host"], user=login_user, passwd=login_password, db="mysql") + except Exception, e: + module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials") + try: + cursor = db_connection.cursor(cursorclass=MySQLdb.cursors.DictCursor) + except Exception, e: + module.fail_json(msg="Trouble getting DictCursor from db_connection: %s" % e) + + if mode in "getmaster": + masterstatus = get_master_status(cursor) + try: + module.exit_json( **masterstatus ) + except TypeError: + module.fail_json(msg="Server is not configured as mysql master") + + elif mode in "getslave": + slavestatus = get_slave_status(cursor) + try: + module.exit_json( **slavestatus ) + except TypeError: + module.fail_json(msg="Server is not configured as mysql slave") + + elif mode in "changemaster": + print "Change master" + chm=[] + if master_host: + chm.append("MASTER_HOST='" + master_host + "'") + if master_user: + chm.append("MASTER_USER='" + master_user + "'") + if master_password: + chm.append("MASTER_PASSWORD='" + master_password + "'") + if master_port: + chm.append("MASTER_PORT=" + master_port) + if master_connect_retry: + chm.append("MASTER_CONNECT_RETRY='" + master_connect_retry + "'") + if master_log_file: + chm.append("MASTER_LOG_FILE='" + master_log_file + "'") + if master_log_pos: + chm.append("MASTER_LOG_POS=" + master_log_pos) + if relay_log_file: + chm.append("RELAY_LOG_FILE='" + relay_log_file + "'") + if relay_log_pos: + chm.append("RELAY_LOG_POS=" + relay_log_pos) + if master_ssl: + chm.append("MASTER_SSL=1") + if master_ssl_ca: + chm.append("MASTER_SSL_CA='" + master_ssl_ca + "'") + if master_ssl_capath: + chm.append("MASTER_SSL_CAPATH='" + master_ssl_capath + "'") + if master_ssl_cert: + chm.append("MASTER_SSL_CERT='" + master_ssl_cert + "'") + if master_ssl_key: + chm.append("MASTER_SSL_KEY='" + master_ssl_key + "'") + if master_ssl_cipher: + chm.append("MASTER_SSL_CIPHER='" + master_ssl_cipher + "'") + changemaster(cursor,chm) + module.exit_json(changed=True) + elif mode in "startslave": + started = start_slave(cursor) + if started is True: + module.exit_json(msg="Slave started ", changed=True) + else: + module.exit_json(msg="Slave already started (Or cannot be started)", changed=False) + elif mode in "stopslave": + stopped = stop_slave(cursor) + if stopped is True: + module.exit_json(msg="Slave stopped", changed=True) + else: + module.exit_json(msg="Slave already stopped", changed=False) + +# import module snippets +from ansible.module_utils.basic import * +main() +warnings.simplefilter("ignore") \ No newline at end of file diff --git a/lib/ansible/modules/extras/database/redis b/lib/ansible/modules/extras/database/redis new file mode 100644 index 00000000000..eb9654631e7 --- /dev/null +++ b/lib/ansible/modules/extras/database/redis @@ -0,0 +1,329 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: redis +short_description: Various redis commands, slave and flush +description: + - Unified utility to interact with redis instances. + 'slave' sets a redis instance in slave or master mode. + 'flush' flushes all the instance or a specified db. + 'config' (new in 1.6), ensures a configuration setting on an instance. +version_added: "1.3" +options: + command: + description: + - The selected redis command + required: true + default: null + choices: [ "slave", "flush", "config" ] + login_password: + description: + - The password used to authenticate with (usually not used) + required: false + default: null + login_host: + description: + - The host running the database + required: false + default: localhost + login_port: + description: + - The port to connect to + required: false + default: 6379 + master_host: + description: + - The host of the master instance [slave command] + required: false + default: null + master_port: + description: + - The port of the master instance [slave command] + required: false + default: null + slave_mode: + description: + - the mode of the redis instance [slave command] + required: false + default: slave + choices: [ "master", "slave" ] + db: + description: + - The database to flush (used in db mode) [flush command] + required: false + default: null + flush_mode: + description: + - Type of flush (all the dbs in a redis instance or a specific one) + [flush command] + required: false + default: all + choices: [ "all", "db" ] + name: + version_added: 1.6 + description: + - A redis config key. + required: false + default: null + value: + version_added: 1.6 + description: + - A redis config value. + required: false + default: null + + +notes: + - Requires the redis-py Python package on the remote host. You can + install it with pip (pip install redis) or with a package manager. + https://github.com/andymccurdy/redis-py + - If the redis master instance we are making slave of is password protected + this needs to be in the redis.conf in the masterauth variable + +requirements: [ redis ] +author: Xabier Larrakoetxea +''' + +EXAMPLES = ''' +# Set local redis instance to be slave of melee.island on port 6377 +- redis: command=slave master_host=melee.island master_port=6377 + +# Deactivate slave mode +- redis: command=slave slave_mode=master + +# Flush all the redis db +- redis: command=flush flush_mode=all + +# Flush only one db in a redis instance +- redis: command=flush db=1 flush_mode=db + +# Configure local redis to have 10000 max clients +- redis: command=config name=maxclients value=10000 + +# Configure local redis to have lua time limit of 100 ms +- redis: command=config name=lua-time-limit value=100 +''' + +try: + import redis +except ImportError: + redis_found = False +else: + redis_found = True + + +# =========================================== +# Redis module specific support methods. +# + +def set_slave_mode(client, master_host, master_port): + try: + return client.slaveof(master_host, master_port) + except Exception: + return False + + +def set_master_mode(client): + try: + return client.slaveof() + except Exception: + return False + + +def flush(client, db=None): + try: + if type(db) != int: + return client.flushall() + else: + # The passed client has been connected to the database already + return client.flushdb() + except Exception: + return False + + +# =========================================== +# Module execution. +# + +def main(): + module = AnsibleModule( + argument_spec = dict( + command=dict(default=None, choices=['slave', 'flush', 'config']), + login_password=dict(default=None), + login_host=dict(default='localhost'), + login_port=dict(default='6379'), + master_host=dict(default=None), + master_port=dict(default=None), + slave_mode=dict(default='slave', choices=['master', 'slave']), + db=dict(default=None), + flush_mode=dict(default='all', choices=['all', 'db']), + name=dict(default=None), + value=dict(default=None) + ), + supports_check_mode = True + ) + + if not redis_found: + module.fail_json(msg="python redis module is required") + + login_password = module.params['login_password'] + login_host = module.params['login_host'] + login_port = int(module.params['login_port']) + command = module.params['command'] + + # Slave Command section ----------- + if command == "slave": + master_host = module.params['master_host'] + master_port = module.params['master_port'] + try: + master_port = int(module.params['master_port']) + except Exception: + pass + mode = module.params['slave_mode'] + + #Check if we have all the data + if mode == "slave": # Only need data if we want to be slave + if not master_host: + module.fail_json( + msg='In slave mode master host must be provided') + + if not master_port: + module.fail_json( + msg='In slave mode master port must be provided') + + #Connect and check + r = redis.StrictRedis(host=login_host, + port=login_port, + password=login_password) + try: + r.ping() + except Exception, e: + module.fail_json(msg="unable to connect to database: %s" % e) + + #Check if we are already in the mode that we want + info = r.info() + if mode == "master" and info["role"] == "master": + module.exit_json(changed=False, mode=mode) + + elif mode == "slave" and\ + info["role"] == "slave" and\ + info["master_host"] == master_host and\ + info["master_port"] == master_port: + status = { + 'status': mode, + 'master_host': master_host, + 'master_port': master_port, + } + module.exit_json(changed=False, mode=status) + else: + # Do the stuff + # (Check Check_mode before commands so the commands aren't evaluated + # if not necessary) + if mode == "slave": + if module.check_mode or\ + set_slave_mode(r, master_host, master_port): + info = r.info() + status = { + 'status': mode, + 'master_host': master_host, + 'master_port': master_port, + } + module.exit_json(changed=True, mode=status) + else: + module.fail_json(msg='Unable to set slave mode') + + else: + if module.check_mode or set_master_mode(r): + module.exit_json(changed=True, mode=mode) + else: + module.fail_json(msg='Unable to set master mode') + + # flush Command section ----------- + elif command == "flush": + try: + db = int(module.params['db']) + except Exception: + db = 0 + mode = module.params['flush_mode'] + + #Check if we have all the data + if mode == "db": + if type(db) != int: + module.fail_json( + msg="In db mode the db number must be provided") + + #Connect and check + r = redis.StrictRedis(host=login_host, + port=login_port, + password=login_password, + db=db) + try: + r.ping() + except Exception, e: + module.fail_json(msg="unable to connect to database: %s" % e) + + # Do the stuff + # (Check Check_mode before commands so the commands aren't evaluated + # if not necessary) + if mode == "all": + if module.check_mode or flush(r): + module.exit_json(changed=True, flushed=True) + else: # Flush never fails :) + module.fail_json(msg="Unable to flush all databases") + + else: + if module.check_mode or flush(r, db): + module.exit_json(changed=True, flushed=True, db=db) + else: # Flush never fails :) + module.fail_json(msg="Unable to flush '%d' database" % db) + elif command == 'config': + name = module.params['name'] + value = module.params['value'] + + r = redis.StrictRedis(host=login_host, + port=login_port, + password=login_password) + + try: + r.ping() + except Exception, e: + module.fail_json(msg="unable to connect to database: %s" % e) + + + try: + old_value = r.config_get(name)[name] + except Exception, e: + module.fail_json(msg="unable to read config: %s" % e) + changed = old_value != value + + if module.check_mode or not changed: + module.exit_json(changed=changed, name=name, value=value) + else: + try: + r.config_set(name, value) + except Exception, e: + module.fail_json(msg="unable to write config: %s" % e) + module.exit_json(changed=changed, name=name, value=value) + else: + module.fail_json(msg='A valid command must be provided') + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/database/riak b/lib/ansible/modules/extras/database/riak new file mode 100644 index 00000000000..b30e7dc485d --- /dev/null +++ b/lib/ansible/modules/extras/database/riak @@ -0,0 +1,255 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, James Martin , Drew Kerrigan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# +DOCUMENTATION = ''' +--- +module: riak +short_description: This module handles some common Riak operations +description: + - This module can be used to join nodes to a cluster, check + the status of the cluster. +version_added: "1.2" +options: + command: + description: + - The command you would like to perform against the cluster. + required: false + default: null + aliases: [] + choices: ['ping', 'kv_test', 'join', 'plan', 'commit'] + config_dir: + description: + - The path to the riak configuration directory + required: false + default: /etc/riak + aliases: [] + http_conn: + description: + - The ip address and port that is listening for Riak HTTP queries + required: false + default: 127.0.0.1:8098 + aliases: [] + target_node: + description: + - The target node for certain operations (join, ping) + required: false + default: riak@127.0.0.1 + aliases: [] + wait_for_handoffs: + description: + - Number of seconds to wait for handoffs to complete. + required: false + default: null + aliases: [] + type: 'int' + wait_for_ring: + description: + - Number of seconds to wait for all nodes to agree on the ring. + required: false + default: null + aliases: [] + type: 'int' + wait_for_service: + description: + - Waits for a riak service to come online before continuing. + required: false + default: None + aliases: [] + choices: ['kv'] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 +''' + +EXAMPLES = ''' +# Join's a Riak node to another node +- riak: command=join target_node=riak@10.1.1.1 + +# Wait for handoffs to finish. Use with async and poll. +- riak: wait_for_handoffs=yes + +# Wait for riak_kv service to startup +- riak: wait_for_service=kv +''' + +import urllib2 +import time +import socket +import sys +try: + import json +except ImportError: + import simplejson as json + + +def ring_check(module, riak_admin_bin): + cmd = '%s ringready' % riak_admin_bin + rc, out, err = module.run_command(cmd) + if rc == 0 and 'TRUE All nodes agree on the ring' in out: + return True + else: + return False + +def main(): + + module = AnsibleModule( + argument_spec=dict( + command=dict(required=False, default=None, choices=[ + 'ping', 'kv_test', 'join', 'plan', 'commit']), + config_dir=dict(default='/etc/riak'), + http_conn=dict(required=False, default='127.0.0.1:8098'), + target_node=dict(default='riak@127.0.0.1', required=False), + wait_for_handoffs=dict(default=False, type='int'), + wait_for_ring=dict(default=False, type='int'), + wait_for_service=dict( + required=False, default=None, choices=['kv']), + validate_certs = dict(default='yes', type='bool')) + ) + + + command = module.params.get('command') + config_dir = module.params.get('config_dir') + http_conn = module.params.get('http_conn') + target_node = module.params.get('target_node') + wait_for_handoffs = module.params.get('wait_for_handoffs') + wait_for_ring = module.params.get('wait_for_ring') + wait_for_service = module.params.get('wait_for_service') + validate_certs = module.params.get('validate_certs') + + + #make sure riak commands are on the path + riak_bin = module.get_bin_path('riak') + riak_admin_bin = module.get_bin_path('riak-admin') + + timeout = time.time() + 120 + while True: + if time.time() > timeout: + module.fail_json(msg='Timeout, could not fetch Riak stats.') + (response, info) = fetch_url(module, 'http://%s/stats' % (http_conn), force=True, timeout=5) + if info['status'] == 200: + stats_raw = response.read() + break + time.sleep(5) + + # here we attempt to load those stats, + try: + stats = json.loads(stats_raw) + except: + module.fail_json(msg='Could not parse Riak stats.') + + node_name = stats['nodename'] + nodes = stats['ring_members'] + ring_size = stats['ring_creation_size'] + rc, out, err = module.run_command([riak_bin, 'version'] ) + version = out.strip() + + result = dict(node_name=node_name, + nodes=nodes, + ring_size=ring_size, + version=version) + + if command == 'ping': + cmd = '%s ping %s' % ( riak_bin, target_node ) + rc, out, err = module.run_command(cmd) + if rc == 0: + result['ping'] = out + else: + module.fail_json(msg=out) + + elif command == 'kv_test': + cmd = '%s test' % riak_admin_bin + rc, out, err = module.run_command(cmd) + if rc == 0: + result['kv_test'] = out + else: + module.fail_json(msg=out) + + elif command == 'join': + if nodes.count(node_name) == 1 and len(nodes) > 1: + result['join'] = 'Node is already in cluster or staged to be in cluster.' + else: + cmd = '%s cluster join %s' % (riak_admin_bin, target_node) + rc, out, err = module.run_command(cmd) + if rc == 0: + result['join'] = out + result['changed'] = True + else: + module.fail_json(msg=out) + + elif command == 'plan': + cmd = '%s cluster plan' % riak_admin_bin + rc, out, err = module.run_command(cmd) + if rc == 0: + result['plan'] = out + if 'Staged Changes' in out: + result['changed'] = True + else: + module.fail_json(msg=out) + + elif command == 'commit': + cmd = '%s cluster commit' % riak_admin_bin + rc, out, err = module.run_command(cmd) + if rc == 0: + result['commit'] = out + result['changed'] = True + else: + module.fail_json(msg=out) + +# this could take a while, recommend to run in async mode + if wait_for_handoffs: + timeout = time.time() + wait_for_handoffs + while True: + cmd = '%s transfers' % riak_admin_bin + rc, out, err = module.run_command(cmd) + if 'No transfers active' in out: + result['handoffs'] = 'No transfers active.' + break + time.sleep(10) + if time.time() > timeout: + module.fail_json(msg='Timeout waiting for handoffs.') + + if wait_for_service: + cmd = [riak_admin_bin, 'wait_for_service', 'riak_%s' % wait_for_service, node_name ] + rc, out, err = module.run_command(cmd) + result['service'] = out + + if wait_for_ring: + timeout = time.time() + wait_for_ring + while True: + if ring_check(module, riak_admin_bin): + break + time.sleep(10) + if time.time() > timeout: + module.fail_json(msg='Timeout waiting for nodes to agree on ring.') + + result['ring_ready'] = ring_check(module, riak_admin_bin) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() diff --git a/lib/ansible/modules/extras/messaging/rabbitmq_parameter b/lib/ansible/modules/extras/messaging/rabbitmq_parameter new file mode 100644 index 00000000000..2f78bd4ee15 --- /dev/null +++ b/lib/ansible/modules/extras/messaging/rabbitmq_parameter @@ -0,0 +1,152 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Chatham Financial +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: rabbitmq_parameter +short_description: Adds or removes parameters to RabbitMQ +description: + - Manage dynamic, cluster-wide parameters for RabbitMQ +version_added: "1.1" +author: Chris Hoffman +options: + component: + description: + - Name of the component of which the parameter is being set + required: true + default: null + name: + description: + - Name of the parameter being set + required: true + default: null + value: + description: + - Value of the parameter, as a JSON term + required: false + default: null + vhost: + description: + - vhost to apply access privileges. + required: false + default: / + node: + description: + - erlang node name of the rabbit we wish to configure + required: false + default: rabbit + version_added: "1.2" + state: + description: + - Specify if user is to be added or removed + required: false + default: present + choices: [ 'present', 'absent'] +''' + +EXAMPLES = """ +# Set the federation parameter 'local_username' to a value of 'guest' (in quotes) +- rabbitmq_parameter: component=federation + name=local-username + value='"guest"' + state=present +""" + +class RabbitMqParameter(object): + def __init__(self, module, component, name, value, vhost, node): + self.module = module + self.component = component + self.name = name + self.value = value + self.vhost = vhost + self.node = node + + self._value = None + + self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) + + def _exec(self, args, run_in_check_mode=False): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = [self._rabbitmqctl, '-q', '-n', self.node] + rc, out, err = self.module.run_command(cmd + args, check_rc=True) + return out.splitlines() + return list() + + def get(self): + parameters = self._exec(['list_parameters', '-p', self.vhost], True) + + for param_item in parameters: + component, name, value = param_item.split('\t') + + if component == self.component and name == self.name: + self._value = value + return True + return False + + def set(self): + self._exec(['set_parameter', '-p', self.vhost, self.component, self.name, self.value]) + + def delete(self): + self._exec(['clear_parameter', '-p', self.vhost, self.component, self.name]) + + def has_modifications(self): + return self.value != self._value + +def main(): + arg_spec = dict( + component=dict(required=True), + name=dict(required=True), + value=dict(default=None), + vhost=dict(default='/'), + state=dict(default='present', choices=['present', 'absent']), + node=dict(default='rabbit') + ) + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + component = module.params['component'] + name = module.params['name'] + value = module.params['value'] + vhost = module.params['vhost'] + state = module.params['state'] + node = module.params['node'] + + rabbitmq_parameter = RabbitMqParameter(module, component, name, value, vhost, node) + + changed = False + if rabbitmq_parameter.get(): + if state == 'absent': + rabbitmq_parameter.delete() + changed = True + else: + if rabbitmq_parameter.has_modifications(): + rabbitmq_parameter.set() + changed = True + elif state == 'present': + rabbitmq_parameter.set() + changed = True + + module.exit_json(changed=changed, component=component, name=name, vhost=vhost, state=state) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/messaging/rabbitmq_plugin b/lib/ansible/modules/extras/messaging/rabbitmq_plugin new file mode 100644 index 00000000000..53c38f978d5 --- /dev/null +++ b/lib/ansible/modules/extras/messaging/rabbitmq_plugin @@ -0,0 +1,130 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Chatham Financial +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: rabbitmq_plugin +short_description: Adds or removes plugins to RabbitMQ +description: + - Enables or disables RabbitMQ plugins +version_added: "1.1" +author: Chris Hoffman +options: + names: + description: + - Comma-separated list of plugin names + required: true + default: null + aliases: [name] + new_only: + description: + - Only enable missing plugins + - Does not disable plugins that are not in the names list + required: false + default: "no" + choices: [ "yes", "no" ] + state: + description: + - Specify if plugins are to be enabled or disabled + required: false + default: enabled + choices: [enabled, disabled] + prefix: + description: + - Specify a custom install prefix to a Rabbit + required: false + version_added: "1.3" + default: null +''' + +EXAMPLES = ''' +# Enables the rabbitmq_management plugin +- rabbitmq_plugin: names=rabbitmq_management state=enabled +''' + +class RabbitMqPlugins(object): + def __init__(self, module): + self.module = module + + if module.params['prefix']: + self._rabbitmq_plugins = module.params['prefix'] + "/sbin/rabbitmq-plugins" + else: + self._rabbitmq_plugins = module.get_bin_path('rabbitmq-plugins', True) + + def _exec(self, args, run_in_check_mode=False): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = [self._rabbitmq_plugins] + rc, out, err = self.module.run_command(cmd + args, check_rc=True) + return out.splitlines() + return list() + + def get_all(self): + return self._exec(['list', '-E', '-m'], True) + + def enable(self, name): + self._exec(['enable', name]) + + def disable(self, name): + self._exec(['disable', name]) + +def main(): + arg_spec = dict( + names=dict(required=True, aliases=['name']), + new_only=dict(default='no', type='bool'), + state=dict(default='enabled', choices=['enabled', 'disabled']), + prefix=dict(required=False, default=None) + ) + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + names = module.params['names'].split(',') + new_only = module.params['new_only'] + state = module.params['state'] + + rabbitmq_plugins = RabbitMqPlugins(module) + enabled_plugins = rabbitmq_plugins.get_all() + + enabled = [] + disabled = [] + if state == 'enabled': + if not new_only: + for plugin in enabled_plugins: + if plugin not in names: + rabbitmq_plugins.disable(plugin) + disabled.append(plugin) + + for name in names: + if name not in enabled_plugins: + rabbitmq_plugins.enable(name) + enabled.append(name) + else: + for plugin in enabled_plugins: + if plugin in names: + rabbitmq_plugins.disable(plugin) + disabled.append(plugin) + + changed = len(enabled) > 0 or len(disabled) > 0 + module.exit_json(changed=changed, enabled=enabled, disabled=disabled) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/messaging/rabbitmq_policy b/lib/ansible/modules/extras/messaging/rabbitmq_policy new file mode 100644 index 00000000000..800c3822d55 --- /dev/null +++ b/lib/ansible/modules/extras/messaging/rabbitmq_policy @@ -0,0 +1,156 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, John Dewey +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: rabbitmq_policy +short_description: Manage the state of policies in RabbitMQ. +description: + - Manage the state of a virtual host in RabbitMQ. +version_added: "1.5" +author: John Dewey +options: + name: + description: + - The name of the policy to manage. + required: true + default: null + vhost: + description: + - The name of the vhost to apply to. + required: false + default: / + pattern: + description: + - A regex of queues to apply the policy to. + required: true + default: null + tags: + description: + - A dict or string describing the policy. + required: true + default: null + priority: + description: + - The priority of the policy. + required: false + default: 0 + node: + description: + - Erlang node name of the rabbit we wish to configure. + required: false + default: rabbit + state: + description: + - The state of the policy. + default: present + choices: [present, absent] +''' + +EXAMPLES = ''' +- name: ensure the default vhost contains the HA policy via a dict + rabbitmq_policy: name=HA pattern='.*' + args: + tags: + "ha-mode": all + +- name: ensure the default vhost contains the HA policy + rabbitmq_policy: name=HA pattern='.*' tags="ha-mode=all" +''' +class RabbitMqPolicy(object): + def __init__(self, module, name): + self._module = module + self._name = name + self._vhost = module.params['vhost'] + self._pattern = module.params['pattern'] + self._tags = module.params['tags'] + self._priority = module.params['priority'] + self._node = module.params['node'] + self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) + + def _exec(self, args, run_in_check_mode=False): + if not self._module.check_mode or (self._module.check_mode and run_in_check_mode): + cmd = [self._rabbitmqctl, '-q', '-n', self._node] + args.insert(1, '-p') + args.insert(2, self._vhost) + rc, out, err = self._module.run_command(cmd + args, check_rc=True) + return out.splitlines() + return list() + + def list(self): + policies = self._exec(['list_policies'], True) + + for policy in policies: + policy_name = policy.split('\t')[1] + if policy_name == self._name: + return True + return False + + def set(self): + import json + args = ['set_policy'] + args.append(self._name) + args.append(self._pattern) + args.append(json.dumps(self._tags)) + args.append('--priority') + args.append(self._priority) + return self._exec(args) + + def clear(self): + return self._exec(['clear_policy', self._name]) + + +def main(): + arg_spec = dict( + name=dict(required=True), + vhost=dict(default='/'), + pattern=dict(required=True), + tags=dict(type='dict', required=True), + priority=dict(default='0'), + node=dict(default='rabbit'), + state=dict(default='present', choices=['present', 'absent']), + ) + + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + name = module.params['name'] + state = module.params['state'] + rabbitmq_policy = RabbitMqPolicy(module, name) + + changed = False + if rabbitmq_policy.list(): + if state == 'absent': + rabbitmq_policy.clear() + changed = True + else: + changed = False + elif state == 'present': + rabbitmq_policy.set() + changed = True + + module.exit_json(changed=changed, name=name, state=state) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/messaging/rabbitmq_user b/lib/ansible/modules/extras/messaging/rabbitmq_user new file mode 100644 index 00000000000..1cbee360dff --- /dev/null +++ b/lib/ansible/modules/extras/messaging/rabbitmq_user @@ -0,0 +1,249 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Chatham Financial +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: rabbitmq_user +short_description: Adds or removes users to RabbitMQ +description: + - Add or remove users to RabbitMQ and assign permissions +version_added: "1.1" +author: Chris Hoffman +options: + user: + description: + - Name of user to add + required: true + default: null + aliases: [username, name] + password: + description: + - Password of user to add. + - To change the password of an existing user, you must also specify + C(force=yes). + required: false + default: null + tags: + description: + - User tags specified as comma delimited + required: false + default: null + vhost: + description: + - vhost to apply access privileges. + required: false + default: / + node: + description: + - erlang node name of the rabbit we wish to configure + required: false + default: rabbit + version_added: "1.2" + configure_priv: + description: + - Regular expression to restrict configure actions on a resource + for the specified vhost. + - By default all actions are restricted. + required: false + default: ^$ + write_priv: + description: + - Regular expression to restrict configure actions on a resource + for the specified vhost. + - By default all actions are restricted. + required: false + default: ^$ + read_priv: + description: + - Regular expression to restrict configure actions on a resource + for the specified vhost. + - By default all actions are restricted. + required: false + default: ^$ + force: + description: + - Deletes and recreates the user. + required: false + default: "no" + choices: [ "yes", "no" ] + state: + description: + - Specify if user is to be added or removed + required: false + default: present + choices: [present, absent] +''' + +EXAMPLES = ''' +# Add user to server and assign full access control +- rabbitmq_user: user=joe + password=changeme + vhost=/ + configure_priv=.* + read_priv=.* + write_priv=.* + state=present +''' + +class RabbitMqUser(object): + def __init__(self, module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node): + self.module = module + self.username = username + self.password = password + self.node = node + if tags is None: + self.tags = list() + else: + self.tags = tags.split(',') + + permissions = dict( + vhost=vhost, + configure_priv=configure_priv, + write_priv=write_priv, + read_priv=read_priv + ) + self.permissions = permissions + + self._tags = None + self._permissions = None + self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) + + def _exec(self, args, run_in_check_mode=False): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = [self._rabbitmqctl, '-q', '-n', self.node] + rc, out, err = self.module.run_command(cmd + args, check_rc=True) + return out.splitlines() + return list() + + def get(self): + users = self._exec(['list_users'], True) + + for user_tag in users: + user, tags = user_tag.split('\t') + + if user == self.username: + for c in ['[',']',' ']: + tags = tags.replace(c, '') + + if tags != '': + self._tags = tags.split(',') + else: + self._tags = list() + + self._permissions = self._get_permissions() + return True + return False + + def _get_permissions(self): + perms_out = self._exec(['list_user_permissions', self.username], True) + + for perm in perms_out: + vhost, configure_priv, write_priv, read_priv = perm.split('\t') + if vhost == self.permissions['vhost']: + return dict(vhost=vhost, configure_priv=configure_priv, write_priv=write_priv, read_priv=read_priv) + + return dict() + + def add(self): + self._exec(['add_user', self.username, self.password]) + + def delete(self): + self._exec(['delete_user', self.username]) + + def set_tags(self): + self._exec(['set_user_tags', self.username] + self.tags) + + def set_permissions(self): + cmd = ['set_permissions'] + cmd.append('-p') + cmd.append(self.permissions['vhost']) + cmd.append(self.username) + cmd.append(self.permissions['configure_priv']) + cmd.append(self.permissions['write_priv']) + cmd.append(self.permissions['read_priv']) + self._exec(cmd) + + def has_tags_modifications(self): + return set(self.tags) != set(self._tags) + + def has_permissions_modifications(self): + return self._permissions != self.permissions + +def main(): + arg_spec = dict( + user=dict(required=True, aliases=['username', 'name']), + password=dict(default=None), + tags=dict(default=None), + vhost=dict(default='/'), + configure_priv=dict(default='^$'), + write_priv=dict(default='^$'), + read_priv=dict(default='^$'), + force=dict(default='no', type='bool'), + state=dict(default='present', choices=['present', 'absent']), + node=dict(default='rabbit') + ) + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + username = module.params['user'] + password = module.params['password'] + tags = module.params['tags'] + vhost = module.params['vhost'] + configure_priv = module.params['configure_priv'] + write_priv = module.params['write_priv'] + read_priv = module.params['read_priv'] + force = module.params['force'] + state = module.params['state'] + node = module.params['node'] + + rabbitmq_user = RabbitMqUser(module, username, password, tags, vhost, configure_priv, write_priv, read_priv, node) + + changed = False + if rabbitmq_user.get(): + if state == 'absent': + rabbitmq_user.delete() + changed = True + else: + if force: + rabbitmq_user.delete() + rabbitmq_user.add() + rabbitmq_user.get() + changed = True + + if rabbitmq_user.has_tags_modifications(): + rabbitmq_user.set_tags() + changed = True + + if rabbitmq_user.has_permissions_modifications(): + rabbitmq_user.set_permissions() + changed = True + elif state == 'present': + rabbitmq_user.add() + rabbitmq_user.set_tags() + rabbitmq_user.set_permissions() + changed = True + + module.exit_json(changed=changed, user=username, state=state) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/messaging/rabbitmq_vhost b/lib/ansible/modules/extras/messaging/rabbitmq_vhost new file mode 100644 index 00000000000..fd4b04a683f --- /dev/null +++ b/lib/ansible/modules/extras/messaging/rabbitmq_vhost @@ -0,0 +1,147 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Chatham Financial +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: rabbitmq_vhost +short_description: Manage the state of a virtual host in RabbitMQ +description: + - Manage the state of a virtual host in RabbitMQ +version_added: "1.1" +author: Chris Hoffman +options: + name: + description: + - The name of the vhost to manage + required: true + default: null + aliases: [vhost] + node: + description: + - erlang node name of the rabbit we wish to configure + required: false + default: rabbit + version_added: "1.2" + tracing: + description: + - Enable/disable tracing for a vhost + default: "no" + choices: [ "yes", "no" ] + aliases: [trace] + state: + description: + - The state of vhost + default: present + choices: [present, absent] +''' + +EXAMPLES = ''' +# Ensure that the vhost /test exists. +- rabbitmq_vhost: name=/test state=present +''' + +class RabbitMqVhost(object): + def __init__(self, module, name, tracing, node): + self.module = module + self.name = name + self.tracing = tracing + self.node = node + + self._tracing = False + self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True) + + def _exec(self, args, run_in_check_mode=False): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = [self._rabbitmqctl, '-q', '-n', self.node] + rc, out, err = self.module.run_command(cmd + args, check_rc=True) + return out.splitlines() + return list() + + def get(self): + vhosts = self._exec(['list_vhosts', 'name', 'tracing'], True) + + for vhost in vhosts: + name, tracing = vhost.split('\t') + if name == self.name: + self._tracing = self.module.boolean(tracing) + return True + return False + + def add(self): + return self._exec(['add_vhost', self.name]) + + def delete(self): + return self._exec(['delete_vhost', self.name]) + + def set_tracing(self): + if self.tracing != self._tracing: + if self.tracing: + self._enable_tracing() + else: + self._disable_tracing() + return True + return False + + def _enable_tracing(self): + return self._exec(['trace_on', '-p', self.name]) + + def _disable_tracing(self): + return self._exec(['trace_off', '-p', self.name]) + + +def main(): + arg_spec = dict( + name=dict(required=True, aliases=['vhost']), + tracing=dict(default='off', aliases=['trace'], type='bool'), + state=dict(default='present', choices=['present', 'absent']), + node=dict(default='rabbit'), + ) + + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + name = module.params['name'] + tracing = module.params['tracing'] + state = module.params['state'] + node = module.params['node'] + + rabbitmq_vhost = RabbitMqVhost(module, name, tracing, node) + + changed = False + if rabbitmq_vhost.get(): + if state == 'absent': + rabbitmq_vhost.delete() + changed = True + else: + if rabbitmq_vhost.set_tracing(): + changed = True + elif state == 'present': + rabbitmq_vhost.add() + rabbitmq_vhost.set_tracing() + changed = True + + module.exit_json(changed=changed, name=name, state=state) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/monitoring/airbrake_deployment b/lib/ansible/modules/extras/monitoring/airbrake_deployment new file mode 100644 index 00000000000..e1c490b881b --- /dev/null +++ b/lib/ansible/modules/extras/monitoring/airbrake_deployment @@ -0,0 +1,130 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2013 Bruce Pennypacker +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: airbrake_deployment +version_added: "1.2" +author: Bruce Pennypacker +short_description: Notify airbrake about app deployments +description: + - Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking) +options: + token: + description: + - API token. + required: true + environment: + description: + - The airbrake environment name, typically 'production', 'staging', etc. + required: true + user: + description: + - The username of the person doing the deployment + required: false + repo: + description: + - URL of the project repository + required: false + revision: + description: + - A hash, number, tag, or other identifier showing what revision was deployed + required: false + url: + description: + - Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit. + required: false + default: "https://airbrake.io/deploys" + version_added: "1.5" + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + +# informational: requirements for nodes +requirements: [ urllib, urllib2 ] +''' + +EXAMPLES = ''' +- airbrake_deployment: token=AAAAAA + environment='staging' + user='ansible' + revision=4.2 +''' + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True), + environment=dict(required=True), + user=dict(required=False), + repo=dict(required=False), + revision=dict(required=False), + url=dict(required=False, default='https://api.airbrake.io/deploys.txt'), + validate_certs=dict(default='yes', type='bool'), + ), + supports_check_mode=True + ) + + # build list of params + params = {} + + if module.params["environment"]: + params["deploy[rails_env]"] = module.params["environment"] + + if module.params["user"]: + params["deploy[local_username]"] = module.params["user"] + + if module.params["repo"]: + params["deploy[scm_repository]"] = module.params["repo"] + + if module.params["revision"]: + params["deploy[scm_revision]"] = module.params["revision"] + + params["api_key"] = module.params["token"] + + url = module.params.get('url') + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True) + + # Send the data to airbrake + data = urllib.urlencode(params) + response, info = fetch_url(module, url, data=data) + if info['status'] == 200: + module.exit_json(changed=True) + else: + module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url)) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() + diff --git a/lib/ansible/modules/extras/monitoring/bigpanda b/lib/ansible/modules/extras/monitoring/bigpanda new file mode 100644 index 00000000000..11950287078 --- /dev/null +++ b/lib/ansible/modules/extras/monitoring/bigpanda @@ -0,0 +1,172 @@ +#!/usr/bin/python + +DOCUMENTATION = ''' +--- +module: bigpanda +author: BigPanda +short_description: Notify BigPanda about deployments +version_added: "1.8" +description: + - Notify BigPanda when deployments start and end (successfully or not). Returns a deployment object containing all the parameters for future module calls. +options: + component: + description: + - "The name of the component being deployed. Ex: billing" + required: true + alias: name + version: + description: + - The deployment version. + required: true + token: + description: + - API token. + required: true + state: + description: + - State of the deployment. + required: true + choices: ['started', 'finished', 'failed'] + hosts: + description: + - Name of affected host name. Can be a list. + required: false + default: machine's hostname + alias: host + env: + description: + - The environment name, typically 'production', 'staging', etc. + required: false + owner: + description: + - The person responsible for the deployment. + required: false + description: + description: + - Free text description of the deployment. + required: false + url: + description: + - Base URL of the API server. + required: False + default: https://api.bigpanda.io + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + +# informational: requirements for nodes +requirements: [ urllib, urllib2 ] +''' + +EXAMPLES = ''' +- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started +... +- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=finished + +or using a deployment object: +- bigpanda: component=myapp version=1.3 token={{ bigpanda_token }} state=started + register: deployment + +- bigpanda: state=finished + args: deployment + +If outside servers aren't reachable from your machine, use local_action and pass the hostname: +- local_action: bigpanda component=myapp version=1.3 hosts={{ansible_hostname}} token={{ bigpanda_token }} state=started + register: deployment +... +- local_action: bigpanda state=finished + args: deployment +''' + +# =========================================== +# Module execution. +# +import socket + +def main(): + + module = AnsibleModule( + argument_spec=dict( + component=dict(required=True, aliases=['name']), + version=dict(required=True), + token=dict(required=True), + state=dict(required=True, choices=['started', 'finished', 'failed']), + hosts=dict(required=False, default=[socket.gethostname()], aliases=['host']), + env=dict(required=False), + owner=dict(required=False), + description=dict(required=False), + message=dict(required=False), + source_system=dict(required=False, default='ansible'), + validate_certs=dict(default='yes', type='bool'), + url=dict(required=False, default='https://api.bigpanda.io'), + ), + supports_check_mode=True, + check_invalid_arguments=False, + ) + + token = module.params['token'] + state = module.params['state'] + url = module.params['url'] + + # Build the common request body + body = dict() + for k in ('component', 'version', 'hosts'): + v = module.params[k] + if v is not None: + body[k] = v + + if not isinstance(body['hosts'], list): + body['hosts'] = [body['hosts']] + + # Insert state-specific attributes to body + if state == 'started': + for k in ('source_system', 'env', 'owner', 'description'): + v = module.params[k] + if v is not None: + body[k] = v + + request_url = url + '/data/events/deployments/start' + else: + message = module.params['message'] + if message is not None: + body['errorMessage'] = message + + if state == 'finished': + body['status'] = 'success' + else: + body['status'] = 'failure' + + request_url = url + '/data/events/deployments/end' + + # Build the deployment object we return + deployment = dict(token=token, url=url) + deployment.update(body) + if 'errorMessage' in deployment: + message = deployment.pop('errorMessage') + deployment['message'] = message + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True, **deployment) + + # Send the data to bigpanda + data = json.dumps(body) + headers = {'Authorization':'Bearer %s' % token, 'Content-Type':'application/json'} + try: + response, info = fetch_url(module, request_url, data=data, headers=headers) + if info['status'] == 200: + module.exit_json(changed=True, **deployment) + else: + module.fail_json(msg=json.dumps(info)) + except Exception as e: + module.fail_json(msg=str(e)) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() diff --git a/lib/ansible/modules/extras/monitoring/boundary_meter b/lib/ansible/modules/extras/monitoring/boundary_meter new file mode 100644 index 00000000000..da739d4306f --- /dev/null +++ b/lib/ansible/modules/extras/monitoring/boundary_meter @@ -0,0 +1,256 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +""" +Ansible module to add boundary meters. + +(c) 2013, curtis + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +import json +import datetime +import base64 +import os + +DOCUMENTATION = ''' + +module: boundary_meter +short_description: Manage boundary meters +description: + - This module manages boundary meters +version_added: "1.3" +author: curtis@serverascode.com +requirements: + - Boundary API access + - bprobe is required to send data, but not to register a meter + - Python urllib2 +options: + name: + description: + - meter name + required: true + state: + description: + - Whether to create or remove the client from boundary + required: false + default: true + choices: ["present", "absent"] + apiid: + description: + - Organizations boundary API ID + required: true + apikey: + description: + - Organizations boundary API KEY + required: true + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 + +notes: + - This module does not yet support boundary tags. + +''' + +EXAMPLES=''' +- name: Create meter + boundary_meter: apiid=AAAAAA api_key=BBBBBB state=present name={{ inventory_hostname }}" + +- name: Delete meter + boundary_meter: apiid=AAAAAA api_key=BBBBBB state=absent name={{ inventory_hostname }}" + +''' + +api_host = "api.boundary.com" +config_directory = "/etc/bprobe" + +# "resource" like thing or apikey? +def auth_encode(apikey): + auth = base64.standard_b64encode(apikey) + auth.replace("\n", "") + return auth + +def build_url(name, apiid, action, meter_id=None, cert_type=None): + if action == "create": + return 'https://%s/%s/meters' % (api_host, apiid) + elif action == "search": + return "https://%s/%s/meters?name=%s" % (api_host, apiid, name) + elif action == "certificates": + return "https://%s/%s/meters/%s/%s.pem" % (api_host, apiid, meter_id, cert_type) + elif action == "tags": + return "https://%s/%s/meters/%s/tags" % (api_host, apiid, meter_id) + elif action == "delete": + return "https://%s/%s/meters/%s" % (api_host, apiid, meter_id) + +def http_request(module, name, apiid, apikey, action, data=None, meter_id=None, cert_type=None): + + if meter_id is None: + url = build_url(name, apiid, action) + else: + if cert_type is None: + url = build_url(name, apiid, action, meter_id) + else: + url = build_url(name, apiid, action, meter_id, cert_type) + + headers = dict() + headers["Authorization"] = "Basic %s" % auth_encode(apikey) + headers["Content-Type"] = "application/json" + + return fetch_url(module, url, data=data, headers=headers) + +def create_meter(module, name, apiid, apikey): + + meters = search_meter(module, name, apiid, apikey) + + if len(meters) > 0: + # If the meter already exists, do nothing + module.exit_json(status="Meter " + name + " already exists",changed=False) + else: + # If it doesn't exist, create it + body = '{"name":"' + name + '"}' + response, info = http_request(module, name, apiid, apikey, data=body, action="create") + + if info['status'] != 200: + module.fail_json(msg="Failed to connect to api host to create meter") + + # If the config directory doesn't exist, create it + if not os.path.exists(config_directory): + try: + os.makedirs(config_directory) + except: + module.fail_json("Could not create " + config_directory) + + + # Download both cert files from the api host + types = ['key', 'cert'] + for cert_type in types: + try: + # If we can't open the file it's not there, so we should download it + cert_file = open('%s/%s.pem' % (config_directory,cert_type)) + except IOError: + # Now download the file... + rc = download_request(module, name, apiid, apikey, cert_type) + if rc == False: + module.fail_json("Download request for " + cert_type + ".pem failed") + + return 0, "Meter " + name + " created" + +def search_meter(module, name, apiid, apikey): + + response, info = http_request(module, name, apiid, apikey, action="search") + + if info['status'] != 200: + module.fail_json("Failed to connect to api host to search for meter") + + # Return meters + return json.loads(response.read()) + +def get_meter_id(module, name, apiid, apikey): + # In order to delete the meter we need its id + meters = search_meter(module, name, apiid, apikey) + + if len(meters) > 0: + return meters[0]['id'] + else: + return None + +def delete_meter(module, name, apiid, apikey): + + meter_id = get_meter_id(module, name, apiid, apikey) + + if meter_id is None: + return 1, "Meter does not exist, so can't delete it" + else: + response, info = http_request(module, name, apiid, apikey, action, meter_id) + if info['status'] != 200: + module.fail_json("Failed to delete meter") + + # Each new meter gets a new key.pem and ca.pem file, so they should be deleted + types = ['cert', 'key'] + for cert_type in types: + try: + cert_file = '%s/%s.pem' % (config_directory,cert_type) + os.remove(cert_file) + except OSError, e: + module.fail_json("Failed to remove " + cert_type + ".pem file") + + return 0, "Meter " + name + " deleted" + +def download_request(module, name, apiid, apikey, cert_type): + + meter_id = get_meter_id(module, name, apiid, apikey) + + if meter_id is not None: + action = "certificates" + response, info = http_request(module, name, apiid, apikey, action, meter_id, cert_type) + if info['status'] != 200: + module.fail_json("Failed to connect to api host to download certificate") + + if result: + try: + cert_file_path = '%s/%s.pem' % (config_directory,cert_type) + body = response.read() + cert_file = open(cert_file_path, 'w') + cert_file.write(body) + cert_file.close + os.chmod(cert_file_path, 0o600) + except: + module.fail_json("Could not write to certificate file") + + return True + else: + module.fail_json("Could not get meter id") + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, choices=['present', 'absent']), + name=dict(required=False), + apikey=dict(required=True), + apiid=dict(required=True), + validate_certs = dict(default='yes', type='bool'), + ) + ) + + state = module.params['state'] + name= module.params['name'] + apikey = module.params['api_key'] + apiid = module.params['api_id'] + + if state == "present": + (rc, result) = create_meter(module, name, apiid, apikey) + + if state == "absent": + (rc, result) = delete_meter(module, name, apiid, apikey) + + if rc != 0: + module.fail_json(msg=result) + + module.exit_json(status=result,changed=True) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +main() + diff --git a/lib/ansible/modules/extras/monitoring/datadog_event b/lib/ansible/modules/extras/monitoring/datadog_event new file mode 100644 index 00000000000..5d38dd4c31d --- /dev/null +++ b/lib/ansible/modules/extras/monitoring/datadog_event @@ -0,0 +1,143 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Author: Artūras 'arturaz' Šlajus +# +# This module is proudly sponsored by iGeolise (www.igeolise.com) and +# Tiny Lab Productions (www.tinylabproductions.com). + +DOCUMENTATION = ''' +--- +module: datadog_event +short_description: Posts events to DataDog service +description: +- "Allows to post events to DataDog (www.datadoghq.com) service." +- "Uses http://docs.datadoghq.com/api/#events API." +version_added: "1.3" +author: Artūras 'arturaz' Šlajus +notes: [] +requirements: [urllib2] +options: + api_key: + description: ["Your DataDog API key."] + required: true + default: null + title: + description: ["The event title."] + required: true + default: null + text: + description: ["The body of the event."] + required: true + default: null + date_happened: + description: + - POSIX timestamp of the event. + - Default value is now. + required: false + default: now + priority: + description: ["The priority of the event."] + required: false + default: normal + choices: [normal, low] + tags: + description: ["Comma separated list of tags to apply to the event."] + required: false + default: null + alert_type: + description: ["Type of alert."] + required: false + default: info + choices: ['error', 'warning', 'info', 'success'] + aggregation_key: + description: ["An arbitrary string to use for aggregation."] + required: false + default: null + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 +''' + +EXAMPLES = ''' +# Post an event with low priority +datadog_event: title="Testing from ansible" text="Test!" priority="low" + api_key="6873258723457823548234234234" +# Post an event with several tags +datadog_event: title="Testing from ansible" text="Test!" + api_key="6873258723457823548234234234" + tags=aa,bb,cc +''' + +import socket + +def main(): + module = AnsibleModule( + argument_spec=dict( + api_key=dict(required=True), + title=dict(required=True), + text=dict(required=True), + date_happened=dict(required=False, default=None, type='int'), + priority=dict( + required=False, default='normal', choices=['normal', 'low'] + ), + tags=dict(required=False, default=None), + alert_type=dict( + required=False, default='info', + choices=['error', 'warning', 'info', 'success'] + ), + aggregation_key=dict(required=False, default=None), + source_type_name=dict( + required=False, default='my apps', + choices=['nagios', 'hudson', 'jenkins', 'user', 'my apps', + 'feed', 'chef', 'puppet', 'git', 'bitbucket', 'fabric', + 'capistrano'] + ), + validate_certs = dict(default='yes', type='bool'), + ) + ) + + post_event(module) + +def post_event(module): + uri = "https://app.datadoghq.com/api/v1/events?api_key=%s" % module.params['api_key'] + + body = dict( + title=module.params['title'], + text=module.params['text'], + priority=module.params['priority'], + alert_type=module.params['alert_type'] + ) + if module.params['date_happened'] != None: + body['date_happened'] = module.params['date_happened'] + if module.params['tags'] != None: + body['tags'] = module.params['tags'].split(",") + if module.params['aggregation_key'] != None: + body['aggregation_key'] = module.params['aggregation_key'] + if module.params['source_type_name'] != None: + body['source_type_name'] = module.params['source_type_name'] + + json_body = module.jsonify(body) + headers = {"Content-Type": "application/json"} + + (response, info) = fetch_url(module, uri, data=json_body, headers=headers) + if info['status'] == 200: + response_body = response.read() + response_json = module.from_json(response_body) + if response_json['status'] == 'ok': + module.exit_json(changed=True) + else: + module.fail_json(msg=response) + else: + module.fail_json(**info) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() diff --git a/lib/ansible/modules/extras/monitoring/librato_annotation b/lib/ansible/modules/extras/monitoring/librato_annotation new file mode 100644 index 00000000000..63979f41bfb --- /dev/null +++ b/lib/ansible/modules/extras/monitoring/librato_annotation @@ -0,0 +1,169 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (C) Seth Edwards, 2014 +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +import base64 + +DOCUMENTATION = ''' +--- +module: librato_annotation +short_description: create an annotation in librato +description: + - Create an annotation event on the given annotation stream :name. If the annotation stream does not exist, it will be created automatically +version_added: "1.6" +author: Seth Edwards +requirements: + - urllib2 + - base64 +options: + user: + description: + - Librato account username + required: true + api_key: + description: + - Librato account api key + required: true + name: + description: + - The annotation stream name + - If the annotation stream does not exist, it will be created automatically + required: false + title: + description: + - The title of an annotation is a string and may contain spaces + - The title should be a short, high-level summary of the annotation e.g. v45 Deployment + required: true + source: + description: + - A string which describes the originating source of an annotation when that annotation is tracked across multiple members of a population + required: false + description: + description: + - The description contains extra meta-data about a particular annotation + - The description should contain specifics on the individual annotation e.g. Deployed 9b562b2 shipped new feature foo! + required: false + start_time: + description: + - The unix timestamp indicating the the time at which the event referenced by this annotation started + required: false + end_time: + description: + - The unix timestamp indicating the the time at which the event referenced by this annotation ended + - For events that have a duration, this is a useful way to annotate the duration of the event + required: false + links: + description: + - See examples + required: true +''' + +EXAMPLES = ''' +# Create a simple annotation event with a source +- librato_annotation: + user: user@example.com + api_key: XXXXXXXXXXXXXXXXX + title: 'App Config Change' + source: 'foo.bar' + description: 'This is a detailed description of the config change' + +# Create an annotation that includes a link +- librato_annotation: + user: user@example.com + api_key: XXXXXXXXXXXXXXXXXX + name: 'code.deploy' + title: 'app code deploy' + description: 'this is a detailed description of a deployment' + links: + - { rel: 'example', href: 'http://www.example.com/deploy' } + +# Create an annotation with a start_time and end_time +- librato_annotation: + user: user@example.com + api_key: XXXXXXXXXXXXXXXXXX + name: 'maintenance' + title: 'Maintenance window' + description: 'This is a detailed description of maintenance' + start_time: 1395940006 + end_time: 1395954406 +''' + + +try: + import urllib2 + HAS_URLLIB2 = True +except ImportError: + HAS_URLLIB2 = False + +def post_annotation(module): + user = module.params['user'] + api_key = module.params['api_key'] + name = module.params['name'] + title = module.params['title'] + + url = 'https://metrics-api.librato.com/v1/annotations/%s' % name + params = {} + params['title'] = title + + if module.params['source'] != None: + params['source'] = module.params['source'] + if module.params['description'] != None: + params['description'] = module.params['description'] + if module.params['start_time'] != None: + params['start_time'] = module.params['start_time'] + if module.params['end_time'] != None: + params['end_time'] = module.params['end_time'] + if module.params['links'] != None: + params['links'] = module.params['links'] + + json_body = module.jsonify(params) + + headers = {} + headers['Content-Type'] = 'application/json' + headers['Authorization'] = b"Basic " + base64.b64encode(user + b":" + api_key).strip() + req = urllib2.Request(url, json_body, headers) + try: + response = urllib2.urlopen(req) + except urllib2.HTTPError as e: + module.fail_json(msg="Request Failed", reason=e.reason) + response = response.read() + module.exit_json(changed=True, annotation=response) + +def main(): + + module = AnsibleModule( + argument_spec = dict( + user = dict(required=True), + api_key = dict(required=True), + name = dict(required=False), + title = dict(required=True), + source = dict(required=False), + description = dict(required=False), + start_time = dict(required=False, default=None, type='int'), + end_time = dict(require=False, default=None, type='int'), + links = dict(type='list') + ) + ) + + post_annotation(module) + +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/monitoring/logentries b/lib/ansible/modules/extras/monitoring/logentries new file mode 100644 index 00000000000..373f4f777ff --- /dev/null +++ b/lib/ansible/modules/extras/monitoring/logentries @@ -0,0 +1,130 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Ivan Vanderbyl +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +DOCUMENTATION = ''' +--- +module: logentries +author: Ivan Vanderbyl +short_description: Module for tracking logs via logentries.com +description: + - Sends logs to LogEntries in realtime +version_added: "1.6" +options: + path: + description: + - path to a log file + required: true + state: + description: + - following state of the log + choices: [ 'present', 'absent' ] + required: false + default: present +notes: + - Requires the LogEntries agent which can be installed following the instructions at logentries.com +''' +EXAMPLES = ''' +- logentries: path=/var/log/nginx/access.log state=present +- logentries: path=/var/log/nginx/error.log state=absent +''' + +def query_log_status(module, le_path, path, state="present"): + """ Returns whether a log is followed or not. """ + + if state == "present": + rc, out, err = module.run_command("%s followed %s" % (le_path, path)) + if rc == 0: + return True + + return False + +def follow_log(module, le_path, logs): + """ Follows one or more logs if not already followed. """ + + followed_count = 0 + + for log in logs: + if query_log_status(module, le_path, log): + continue + + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = module.run_command([le_path, 'follow', log]) + + if not query_log_status(module, le_path, log): + module.fail_json(msg="failed to follow '%s': %s" % (log, err.strip())) + + followed_count += 1 + + if followed_count > 0: + module.exit_json(changed=True, msg="followed %d log(s)" % (followed_count,)) + + module.exit_json(changed=False, msg="logs(s) already followed") + +def unfollow_log(module, le_path, logs): + """ Unfollows one or more logs if followed. """ + + removed_count = 0 + + # Using a for loop incase of error, we can report the package that failed + for log in logs: + # Query the log first, to see if we even need to remove. + if not query_log_status(module, le_path, log): + continue + + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = module.run_command([le_path, 'rm', log]) + + if query_log_status(module, le_path, log): + module.fail_json(msg="failed to remove '%s': %s" % (log, err.strip())) + + removed_count += 1 + + if removed_count > 0: + module.exit_json(changed=True, msg="removed %d package(s)" % removed_count) + + module.exit_json(changed=False, msg="logs(s) already unfollowed") + +def main(): + module = AnsibleModule( + argument_spec = dict( + path = dict(aliases=["name"], required=True), + state = dict(default="present", choices=["present", "followed", "absent", "unfollowed"]) + ), + supports_check_mode=True + ) + + le_path = module.get_bin_path('le', True, ['/usr/local/bin']) + + p = module.params + + # Handle multiple log files + logs = p["path"].split(",") + logs = filter(None, logs) + + if p["state"] in ["present", "followed"]: + follow_log(module, le_path, logs) + + elif p["state"] in ["absent", "unfollowed"]: + unfollow_log(module, le_path, logs) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/monitoring/monit b/lib/ansible/modules/extras/monitoring/monit new file mode 100644 index 00000000000..558f1e696f2 --- /dev/null +++ b/lib/ansible/modules/extras/monitoring/monit @@ -0,0 +1,155 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Darryl Stoflet +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: monit +short_description: Manage the state of a program monitored via Monit +description: + - Manage the state of a program monitored via I(Monit) +version_added: "1.2" +options: + name: + description: + - The name of the I(monit) program/process to manage + required: true + default: null + state: + description: + - The state of service + required: true + default: null + choices: [ "present", "started", "stopped", "restarted", "monitored", "unmonitored", "reloaded" ] +requirements: [ ] +author: Darryl Stoflet +''' + +EXAMPLES = ''' +# Manage the state of program "httpd" to be in "started" state. +- monit: name=httpd state=started +''' + +def main(): + arg_spec = dict( + name=dict(required=True), + state=dict(required=True, choices=['present', 'started', 'restarted', 'stopped', 'monitored', 'unmonitored', 'reloaded']) + ) + + module = AnsibleModule(argument_spec=arg_spec, supports_check_mode=True) + + name = module.params['name'] + state = module.params['state'] + + MONIT = module.get_bin_path('monit', True) + + if state == 'reloaded': + if module.check_mode: + module.exit_json(changed=True) + rc, out, err = module.run_command('%s reload' % MONIT) + if rc != 0: + module.fail_json(msg='monit reload failed', stdout=out, stderr=err) + module.exit_json(changed=True, name=name, state=state) + + def status(): + """Return the status of the process in monit, or the empty string if not present.""" + rc, out, err = module.run_command('%s summary' % MONIT, check_rc=True) + for line in out.split('\n'): + # Sample output lines: + # Process 'name' Running + # Process 'name' Running - restart pending + parts = line.lower().split() + if len(parts) > 2 and parts[0] == 'process' and parts[1] == "'%s'" % name: + return ' '.join(parts[2:]) + else: + return '' + + def run_command(command): + """Runs a monit command, and returns the new status.""" + module.run_command('%s %s %s' % (MONIT, command, name), check_rc=True) + return status() + + present = status() != '' + + if not present and not state == 'present': + module.fail_json(msg='%s process not presently configured with monit' % name, name=name, state=state) + + if state == 'present': + if not present: + if module.check_mode: + module.exit_json(changed=True) + status = run_command('reload') + if status == '': + module.fail_json(msg='%s process not configured with monit' % name, name=name, state=state) + else: + module.exit_json(changed=True, name=name, state=state) + module.exit_json(changed=False, name=name, state=state) + + running = 'running' in status() + + if running and state in ['started', 'monitored']: + module.exit_json(changed=False, name=name, state=state) + + if running and state == 'stopped': + if module.check_mode: + module.exit_json(changed=True) + status = run_command('stop') + if status in ['not monitored'] or 'stop pending' in status: + module.exit_json(changed=True, name=name, state=state) + module.fail_json(msg='%s process not stopped' % name, status=status) + + if running and state == 'unmonitored': + if module.check_mode: + module.exit_json(changed=True) + status = run_command('unmonitor') + if status in ['not monitored']: + module.exit_json(changed=True, name=name, state=state) + module.fail_json(msg='%s process not unmonitored' % name, status=status) + + elif state == 'restarted': + if module.check_mode: + module.exit_json(changed=True) + status = run_command('restart') + if status in ['initializing', 'running'] or 'restart pending' in status: + module.exit_json(changed=True, name=name, state=state) + module.fail_json(msg='%s process not restarted' % name, status=status) + + elif not running and state == 'started': + if module.check_mode: + module.exit_json(changed=True) + status = run_command('start') + if status in ['initializing', 'running'] or 'start pending' in status: + module.exit_json(changed=True, name=name, state=state) + module.fail_json(msg='%s process not started' % name, status=status) + + elif not running and state == 'monitored': + if module.check_mode: + module.exit_json(changed=True) + status = run_command('monitor') + if status not in ['not monitored']: + module.exit_json(changed=True, name=name, state=state) + module.fail_json(msg='%s process not monitored' % name, status=status) + + module.exit_json(changed=False, name=name, state=state) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/monitoring/nagios b/lib/ansible/modules/extras/monitoring/nagios new file mode 100644 index 00000000000..9219766b86a --- /dev/null +++ b/lib/ansible/modules/extras/monitoring/nagios @@ -0,0 +1,880 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# This file is largely copied from the Nagios module included in the +# Func project. Original copyright follows: +# +# func-nagios - Schedule downtime and enables/disable notifications +# Copyright 2011, Red Hat, Inc. +# Tim Bielawa +# +# This software may be freely redistributed under the terms of the GNU +# general public license version 2. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +DOCUMENTATION = ''' +--- +module: nagios +short_description: Perform common tasks in Nagios related to downtime and notifications. +description: + - "The M(nagios) module has two basic functions: scheduling downtime and toggling alerts for services or hosts." + - All actions require the I(host) parameter to be given explicitly. In playbooks you can use the C({{inventory_hostname}}) variable to refer to the host the playbook is currently running on. + - You can specify multiple services at once by separating them with commas, .e.g., C(services=httpd,nfs,puppet). + - When specifying what service to handle there is a special service value, I(host), which will handle alerts/downtime for the I(host itself), e.g., C(service=host). This keyword may not be given with other services at the same time. I(Setting alerts/downtime for a host does not affect alerts/downtime for any of the services running on it.) To schedule downtime for all services on particular host use keyword "all", e.g., C(service=all). + - When using the M(nagios) module you will need to specify your Nagios server using the C(delegate_to) parameter. +version_added: "0.7" +options: + action: + description: + - Action to take. + required: true + default: null + choices: [ "downtime", "enable_alerts", "disable_alerts", "silence", "unsilence", + "silence_nagios", "unsilence_nagios", "command" ] + host: + description: + - Host to operate on in Nagios. + required: false + default: null + cmdfile: + description: + - Path to the nagios I(command file) (FIFO pipe). + Only required if auto-detection fails. + required: false + default: auto-detected + author: + description: + - Author to leave downtime comments as. + Only usable with the C(downtime) action. + required: false + default: Ansible + minutes: + description: + - Minutes to schedule downtime for. + - Only usable with the C(downtime) action. + required: false + default: 30 + services: + description: + - What to manage downtime/alerts for. Separate multiple services with commas. + C(service) is an alias for C(services). + B(Required) option when using the C(downtime), C(enable_alerts), and C(disable_alerts) actions. + aliases: [ "service" ] + required: true + default: null + command: + description: + - The raw command to send to nagios, which + should not include the submitted time header or the line-feed + B(Required) option when using the C(command) action. + required: true + default: null + +author: Tim Bielawa +requirements: [ "Nagios" ] +''' + +EXAMPLES = ''' +# set 30 minutes of apache downtime +- nagios: action=downtime minutes=30 service=httpd host={{ inventory_hostname }} + +# schedule an hour of HOST downtime +- nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }} + +# schedule downtime for ALL services on HOST +- nagios: action=downtime minutes=45 service=all host={{ inventory_hostname }} + +# schedule downtime for a few services +- nagios: action=downtime services=frob,foobar,qeuz host={{ inventory_hostname }} + +# enable SMART disk alerts +- nagios: action=enable_alerts service=smart host={{ inventory_hostname }} + +# "two services at once: disable httpd and nfs alerts" +- nagios: action=disable_alerts service=httpd,nfs host={{ inventory_hostname }} + +# disable HOST alerts +- nagios: action=disable_alerts service=host host={{ inventory_hostname }} + +# silence ALL alerts +- nagios: action=silence host={{ inventory_hostname }} + +# unsilence all alerts +- nagios: action=unsilence host={{ inventory_hostname }} + +# SHUT UP NAGIOS +- nagios: action=silence_nagios + +# ANNOY ME NAGIOS +- nagios: action=unsilence_nagios + +# command something +- nagios: action=command command='DISABLE_FAILURE_PREDICTION' +''' + +import ConfigParser +import types +import time +import os.path + +###################################################################### + + +def which_cmdfile(): + locations = [ + # rhel + '/etc/nagios/nagios.cfg', + # debian + '/etc/nagios3/nagios.cfg', + # older debian + '/etc/nagios2/nagios.cfg', + # bsd, solaris + '/usr/local/etc/nagios/nagios.cfg', + # groundwork it monitoring + '/usr/local/groundwork/nagios/etc/nagios.cfg', + # open monitoring distribution + '/omd/sites/oppy/tmp/nagios/nagios.cfg', + # ??? + '/usr/local/nagios/etc/nagios.cfg', + '/usr/local/nagios/nagios.cfg', + '/opt/nagios/etc/nagios.cfg', + '/opt/nagios/nagios.cfg', + # icinga on debian/ubuntu + '/etc/icinga/icinga.cfg', + # icinga installed from source (default location) + '/usr/local/icinga/etc/icinga.cfg', + ] + + for path in locations: + if os.path.exists(path): + for line in open(path): + if line.startswith('command_file'): + return line.split('=')[1].strip() + + return None + +###################################################################### + + +def main(): + ACTION_CHOICES = [ + 'downtime', + 'silence', + 'unsilence', + 'enable_alerts', + 'disable_alerts', + 'silence_nagios', + 'unsilence_nagios', + 'command', + ] + + module = AnsibleModule( + argument_spec=dict( + action=dict(required=True, default=None, choices=ACTION_CHOICES), + author=dict(default='Ansible'), + host=dict(required=False, default=None), + minutes=dict(default=30), + cmdfile=dict(default=which_cmdfile()), + services=dict(default=None, aliases=['service']), + command=dict(required=False, default=None), + ) + ) + + action = module.params['action'] + host = module.params['host'] + minutes = module.params['minutes'] + services = module.params['services'] + cmdfile = module.params['cmdfile'] + command = module.params['command'] + + ################################################################## + # Required args per action: + # downtime = (minutes, service, host) + # (un)silence = (host) + # (enable/disable)_alerts = (service, host) + # command = command + # + # AnsibleModule will verify most stuff, we need to verify + # 'minutes' and 'service' manually. + + ################################################################## + if action not in ['command', 'silence_nagios', 'unsilence_nagios']: + if not host: + module.fail_json(msg='no host specified for action requiring one') + ###################################################################### + if action == 'downtime': + # Make sure there's an actual service selected + if not services: + module.fail_json(msg='no service selected to set downtime for') + # Make sure minutes is a number + try: + m = int(minutes) + if not isinstance(m, types.IntType): + module.fail_json(msg='minutes must be a number') + except Exception: + module.fail_json(msg='invalid entry for minutes') + + ################################################################## + if action in ['enable_alerts', 'disable_alerts']: + if not services: + module.fail_json(msg='a service is required when setting alerts') + + if action in ['command']: + if not command: + module.fail_json(msg='no command passed for command action') + ################################################################## + if not cmdfile: + module.fail_json('unable to locate nagios.cfg') + + ################################################################## + ansible_nagios = Nagios(module, **module.params) + if module.check_mode: + module.exit_json(changed=True) + else: + ansible_nagios.act() + ################################################################## + + +###################################################################### +class Nagios(object): + """ + Perform common tasks in Nagios related to downtime and + notifications. + + The complete set of external commands Nagios handles is documented + on their website: + + http://old.nagios.org/developerinfo/externalcommands/commandlist.php + + Note that in the case of `schedule_svc_downtime`, + `enable_svc_notifications`, and `disable_svc_notifications`, the + service argument should be passed as a list. + """ + + def __init__(self, module, **kwargs): + self.module = module + self.action = kwargs['action'] + self.author = kwargs['author'] + self.host = kwargs['host'] + self.minutes = int(kwargs['minutes']) + self.cmdfile = kwargs['cmdfile'] + self.command = kwargs['command'] + + if (kwargs['services'] is None) or (kwargs['services'] == 'host') or (kwargs['services'] == 'all'): + self.services = kwargs['services'] + else: + self.services = kwargs['services'].split(',') + + self.command_results = [] + + def _now(self): + """ + The time in seconds since 12:00:00AM Jan 1, 1970 + """ + + return int(time.time()) + + def _write_command(self, cmd): + """ + Write the given command to the Nagios command file + """ + + try: + fp = open(self.cmdfile, 'w') + fp.write(cmd) + fp.flush() + fp.close() + self.command_results.append(cmd.strip()) + except IOError: + self.module.fail_json(msg='unable to write to nagios command file', + cmdfile=self.cmdfile) + + def _fmt_dt_str(self, cmd, host, duration, author=None, + comment="Scheduling downtime", start=None, + svc=None, fixed=1, trigger=0): + """ + Format an external-command downtime string. + + cmd - Nagios command ID + host - Host schedule downtime on + duration - Minutes to schedule downtime for + author - Name to file the downtime as + comment - Reason for running this command (upgrade, reboot, etc) + start - Start of downtime in seconds since 12:00AM Jan 1 1970 + Default is to use the entry time (now) + svc - Service to schedule downtime for, omit when for host downtime + fixed - Start now if 1, start when a problem is detected if 0 + trigger - Optional ID of event to start downtime from. Leave as 0 for + fixed downtime. + + Syntax: [submitted] COMMAND;;[] + ;;;;;; + + """ + + entry_time = self._now() + if start is None: + start = entry_time + + hdr = "[%s] %s;%s;" % (entry_time, cmd, host) + duration_s = (duration * 60) + end = start + duration_s + + if not author: + author = self.author + + if svc is not None: + dt_args = [svc, str(start), str(end), str(fixed), str(trigger), + str(duration_s), author, comment] + else: + # Downtime for a host if no svc specified + dt_args = [str(start), str(end), str(fixed), str(trigger), + str(duration_s), author, comment] + + dt_arg_str = ";".join(dt_args) + dt_str = hdr + dt_arg_str + "\n" + + return dt_str + + def _fmt_notif_str(self, cmd, host=None, svc=None): + """ + Format an external-command notification string. + + cmd - Nagios command ID. + host - Host to en/disable notifications on.. A value is not required + for global downtime + svc - Service to schedule downtime for. A value is not required + for host downtime. + + Syntax: [submitted] COMMAND;[;] + """ + + entry_time = self._now() + notif_str = "[%s] %s" % (entry_time, cmd) + if host is not None: + notif_str += ";%s" % host + + if svc is not None: + notif_str += ";%s" % svc + + notif_str += "\n" + + return notif_str + + def schedule_svc_downtime(self, host, services=[], minutes=30): + """ + This command is used to schedule downtime for a particular + service. + + During the specified downtime, Nagios will not send + notifications out about the service. + + Syntax: SCHEDULE_SVC_DOWNTIME;; + ;;;;;; + + """ + + cmd = "SCHEDULE_SVC_DOWNTIME" + for service in services: + dt_cmd_str = self._fmt_dt_str(cmd, host, minutes, svc=service) + self._write_command(dt_cmd_str) + + def schedule_host_downtime(self, host, minutes=30): + """ + This command is used to schedule downtime for a particular + host. + + During the specified downtime, Nagios will not send + notifications out about the host. + + Syntax: SCHEDULE_HOST_DOWNTIME;;;; + ;;;; + """ + + cmd = "SCHEDULE_HOST_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, host, minutes) + self._write_command(dt_cmd_str) + + def schedule_host_svc_downtime(self, host, minutes=30): + """ + This command is used to schedule downtime for + all services associated with a particular host. + + During the specified downtime, Nagios will not send + notifications out about the host. + + SCHEDULE_HOST_SVC_DOWNTIME;;;; + ;;;; + """ + + cmd = "SCHEDULE_HOST_SVC_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, host, minutes) + self._write_command(dt_cmd_str) + + def schedule_hostgroup_host_downtime(self, hostgroup, minutes=30): + """ + This command is used to schedule downtime for all hosts in a + particular hostgroup. + + During the specified downtime, Nagios will not send + notifications out about the hosts. + + Syntax: SCHEDULE_HOSTGROUP_HOST_DOWNTIME;;; + ;;;;; + """ + + cmd = "SCHEDULE_HOSTGROUP_HOST_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes) + self._write_command(dt_cmd_str) + + def schedule_hostgroup_svc_downtime(self, hostgroup, minutes=30): + """ + This command is used to schedule downtime for all services in + a particular hostgroup. + + During the specified downtime, Nagios will not send + notifications out about the services. + + Note that scheduling downtime for services does not + automatically schedule downtime for the hosts those services + are associated with. + + Syntax: SCHEDULE_HOSTGROUP_SVC_DOWNTIME;;; + ;;;;; + """ + + cmd = "SCHEDULE_HOSTGROUP_SVC_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, hostgroup, minutes) + self._write_command(dt_cmd_str) + + def schedule_servicegroup_host_downtime(self, servicegroup, minutes=30): + """ + This command is used to schedule downtime for all hosts in a + particular servicegroup. + + During the specified downtime, Nagios will not send + notifications out about the hosts. + + Syntax: SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;; + ;;;;;; + + """ + + cmd = "SCHEDULE_SERVICEGROUP_HOST_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes) + self._write_command(dt_cmd_str) + + def schedule_servicegroup_svc_downtime(self, servicegroup, minutes=30): + """ + This command is used to schedule downtime for all services in + a particular servicegroup. + + During the specified downtime, Nagios will not send + notifications out about the services. + + Note that scheduling downtime for services does not + automatically schedule downtime for the hosts those services + are associated with. + + Syntax: SCHEDULE_SERVICEGROUP_SVC_DOWNTIME;; + ;;;;;; + + """ + + cmd = "SCHEDULE_SERVICEGROUP_SVC_DOWNTIME" + dt_cmd_str = self._fmt_dt_str(cmd, servicegroup, minutes) + self._write_command(dt_cmd_str) + + def disable_host_svc_notifications(self, host): + """ + This command is used to prevent notifications from being sent + out for all services on the specified host. + + Note that this command does not disable notifications from + being sent out about the host. + + Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOST_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + self._write_command(notif_str) + + def disable_host_notifications(self, host): + """ + This command is used to prevent notifications from being sent + out for the specified host. + + Note that this command does not disable notifications for + services associated with this host. + + Syntax: DISABLE_HOST_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + self._write_command(notif_str) + + def disable_svc_notifications(self, host, services=[]): + """ + This command is used to prevent notifications from being sent + out for the specified service. + + Note that this command does not disable notifications from + being sent out about the host. + + Syntax: DISABLE_SVC_NOTIFICATIONS;; + """ + + cmd = "DISABLE_SVC_NOTIFICATIONS" + for service in services: + notif_str = self._fmt_notif_str(cmd, host, svc=service) + self._write_command(notif_str) + + def disable_servicegroup_host_notifications(self, servicegroup): + """ + This command is used to prevent notifications from being sent + out for all hosts in the specified servicegroup. + + Note that this command does not disable notifications for + services associated with hosts in this service group. + + Syntax: DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "DISABLE_SERVICEGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + self._write_command(notif_str) + + def disable_servicegroup_svc_notifications(self, servicegroup): + """ + This command is used to prevent notifications from being sent + out for all services in the specified servicegroup. + + Note that this does not prevent notifications from being sent + out about the hosts in this servicegroup. + + Syntax: DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "DISABLE_SERVICEGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + self._write_command(notif_str) + + def disable_hostgroup_host_notifications(self, hostgroup): + """ + Disables notifications for all hosts in a particular + hostgroup. + + Note that this does not disable notifications for the services + associated with the hosts in the hostgroup - see the + DISABLE_HOSTGROUP_SVC_NOTIFICATIONS command for that. + + Syntax: DISABLE_HOSTGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOSTGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + self._write_command(notif_str) + + def disable_hostgroup_svc_notifications(self, hostgroup): + """ + Disables notifications for all services associated with hosts + in a particular hostgroup. + + Note that this does not disable notifications for the hosts in + the hostgroup - see the DISABLE_HOSTGROUP_HOST_NOTIFICATIONS + command for that. + + Syntax: DISABLE_HOSTGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "DISABLE_HOSTGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + self._write_command(notif_str) + + def enable_host_notifications(self, host): + """ + Enables notifications for a particular host. + + Note that this command does not enable notifications for + services associated with this host. + + Syntax: ENABLE_HOST_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + self._write_command(notif_str) + + def enable_host_svc_notifications(self, host): + """ + Enables notifications for all services on the specified host. + + Note that this does not enable notifications for the host. + + Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOST_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, host) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_svc_notifications(self, host, services=[]): + """ + Enables notifications for a particular service. + + Note that this does not enable notifications for the host. + + Syntax: ENABLE_SVC_NOTIFICATIONS;; + """ + + cmd = "ENABLE_SVC_NOTIFICATIONS" + nagios_return = True + return_str_list = [] + for service in services: + notif_str = self._fmt_notif_str(cmd, host, svc=service) + nagios_return = self._write_command(notif_str) and nagios_return + return_str_list.append(notif_str) + + if nagios_return: + return return_str_list + else: + return "Fail: could not write to the command file" + + def enable_hostgroup_host_notifications(self, hostgroup): + """ + Enables notifications for all hosts in a particular hostgroup. + + Note that this command does not enable notifications for + services associated with the hosts in this hostgroup. + + Syntax: ENABLE_HOSTGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOSTGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_hostgroup_svc_notifications(self, hostgroup): + """ + Enables notifications for all services that are associated + with hosts in a particular hostgroup. + + Note that this does not enable notifications for the hosts in + this hostgroup. + + Syntax: ENABLE_HOSTGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "ENABLE_HOSTGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, hostgroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_servicegroup_host_notifications(self, servicegroup): + """ + Enables notifications for all hosts that have services that + are members of a particular servicegroup. + + Note that this command does not enable notifications for + services associated with the hosts in this servicegroup. + + Syntax: ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS; + """ + + cmd = "ENABLE_SERVICEGROUP_HOST_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def enable_servicegroup_svc_notifications(self, servicegroup): + """ + Enables notifications for all services that are members of a + particular servicegroup. + + Note that this does not enable notifications for the hosts in + this servicegroup. + + Syntax: ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS; + """ + + cmd = "ENABLE_SERVICEGROUP_SVC_NOTIFICATIONS" + notif_str = self._fmt_notif_str(cmd, servicegroup) + nagios_return = self._write_command(notif_str) + + if nagios_return: + return notif_str + else: + return "Fail: could not write to the command file" + + def silence_host(self, host): + """ + This command is used to prevent notifications from being sent + out for the host and all services on the specified host. + + This is equivalent to calling disable_host_svc_notifications + and disable_host_notifications. + + Syntax: DISABLE_HOST_SVC_NOTIFICATIONS; + Syntax: DISABLE_HOST_NOTIFICATIONS; + """ + + cmd = [ + "DISABLE_HOST_SVC_NOTIFICATIONS", + "DISABLE_HOST_NOTIFICATIONS" + ] + nagios_return = True + return_str_list = [] + for c in cmd: + notif_str = self._fmt_notif_str(c, host) + nagios_return = self._write_command(notif_str) and nagios_return + return_str_list.append(notif_str) + + if nagios_return: + return return_str_list + else: + return "Fail: could not write to the command file" + + def unsilence_host(self, host): + """ + This command is used to enable notifications for the host and + all services on the specified host. + + This is equivalent to calling enable_host_svc_notifications + and enable_host_notifications. + + Syntax: ENABLE_HOST_SVC_NOTIFICATIONS; + Syntax: ENABLE_HOST_NOTIFICATIONS; + """ + + cmd = [ + "ENABLE_HOST_SVC_NOTIFICATIONS", + "ENABLE_HOST_NOTIFICATIONS" + ] + nagios_return = True + return_str_list = [] + for c in cmd: + notif_str = self._fmt_notif_str(c, host) + nagios_return = self._write_command(notif_str) and nagios_return + return_str_list.append(notif_str) + + if nagios_return: + return return_str_list + else: + return "Fail: could not write to the command file" + + def silence_nagios(self): + """ + This command is used to disable notifications for all hosts and services + in nagios. + + This is a 'SHUT UP, NAGIOS' command + """ + cmd = 'DISABLE_NOTIFICATIONS' + self._write_command(self._fmt_notif_str(cmd)) + + def unsilence_nagios(self): + """ + This command is used to enable notifications for all hosts and services + in nagios. + + This is a 'OK, NAGIOS, GO'' command + """ + cmd = 'ENABLE_NOTIFICATIONS' + self._write_command(self._fmt_notif_str(cmd)) + + def nagios_cmd(self, cmd): + """ + This sends an arbitrary command to nagios + + It prepends the submitted time and appends a \n + + You just have to provide the properly formatted command + """ + + pre = '[%s]' % int(time.time()) + + post = '\n' + cmdstr = '%s %s %s' % (pre, cmd, post) + self._write_command(cmdstr) + + def act(self): + """ + Figure out what you want to do from ansible, and then do the + needful (at the earliest). + """ + # host or service downtime? + if self.action == 'downtime': + if self.services == 'host': + self.schedule_host_downtime(self.host, self.minutes) + elif self.services == 'all': + self.schedule_host_svc_downtime(self.host, self.minutes) + else: + self.schedule_svc_downtime(self.host, + services=self.services, + minutes=self.minutes) + + # toggle the host AND service alerts + elif self.action == 'silence': + self.silence_host(self.host) + + elif self.action == 'unsilence': + self.unsilence_host(self.host) + + # toggle host/svc alerts + elif self.action == 'enable_alerts': + if self.services == 'host': + self.enable_host_notifications(self.host) + else: + self.enable_svc_notifications(self.host, + services=self.services) + + elif self.action == 'disable_alerts': + if self.services == 'host': + self.disable_host_notifications(self.host) + else: + self.disable_svc_notifications(self.host, + services=self.services) + elif self.action == 'silence_nagios': + self.silence_nagios() + + elif self.action == 'unsilence_nagios': + self.unsilence_nagios() + + elif self.action == 'command': + self.nagios_cmd(self.command) + + # wtf? + else: + self.module.fail_json(msg="unknown action specified: '%s'" % \ + self.action) + + self.module.exit_json(nagios_commands=self.command_results, + changed=True) + +###################################################################### +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/monitoring/newrelic_deployment b/lib/ansible/modules/extras/monitoring/newrelic_deployment new file mode 100644 index 00000000000..93d55832fd3 --- /dev/null +++ b/lib/ansible/modules/extras/monitoring/newrelic_deployment @@ -0,0 +1,145 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2013 Matt Coddington +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: newrelic_deployment +version_added: "1.2" +author: Matt Coddington +short_description: Notify newrelic about app deployments +description: + - Notify newrelic about app deployments (see http://newrelic.github.io/newrelic_api/NewRelicApi/Deployment.html) +options: + token: + description: + - API token. + required: true + app_name: + description: + - (one of app_name or application_id are required) The value of app_name in the newrelic.yml file used by the application + required: false + application_id: + description: + - (one of app_name or application_id are required) The application id, found in the URL when viewing the application in RPM + required: false + changelog: + description: + - A list of changes for this deployment + required: false + description: + description: + - Text annotation for the deployment - notes for you + required: false + revision: + description: + - A revision number (e.g., git commit SHA) + required: false + user: + description: + - The name of the user/process that triggered this deployment + required: false + appname: + description: + - Name of the application + required: false + environment: + description: + - The environment for this deployment + required: false + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 + +# informational: requirements for nodes +requirements: [ urllib, urllib2 ] +''' + +EXAMPLES = ''' +- newrelic_deployment: token=AAAAAA + app_name=myapp + user='ansible deployment' + revision=1.0 +''' + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True), + app_name=dict(required=False), + application_id=dict(required=False), + changelog=dict(required=False), + description=dict(required=False), + revision=dict(required=False), + user=dict(required=False), + appname=dict(required=False), + environment=dict(required=False), + validate_certs = dict(default='yes', type='bool'), + ), + supports_check_mode=True + ) + + # build list of params + params = {} + if module.params["app_name"] and module.params["application_id"]: + module.fail_json(msg="only one of 'app_name' or 'application_id' can be set") + + if module.params["app_name"]: + params["app_name"] = module.params["app_name"] + elif module.params["application_id"]: + params["application_id"] = module.params["application_id"] + else: + module.fail_json(msg="you must set one of 'app_name' or 'application_id'") + + for item in [ "changelog", "description", "revision", "user", "appname", "environment" ]: + if module.params[item]: + params[item] = module.params[item] + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=True) + + # Send the data to NewRelic + url = "https://rpm.newrelic.com/deployments.xml" + data = urllib.urlencode(params) + headers = { + 'x-api-key': module.params["token"], + } + response, info = fetch_url(module, url, data=data, headers=headers) + if info['status'] in (200, 201): + module.exit_json(changed=True) + else: + module.fail_json(msg="unable to update newrelic: %s" % info['msg']) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() + diff --git a/lib/ansible/modules/extras/monitoring/pagerduty b/lib/ansible/modules/extras/monitoring/pagerduty new file mode 100644 index 00000000000..5ca33717dc9 --- /dev/null +++ b/lib/ansible/modules/extras/monitoring/pagerduty @@ -0,0 +1,232 @@ +#!/usr/bin/python + +DOCUMENTATION = ''' + +module: pagerduty +short_description: Create PagerDuty maintenance windows +description: + - This module will let you create PagerDuty maintenance windows +version_added: "1.2" +author: Justin Johns +requirements: + - PagerDuty API access +options: + state: + description: + - Create a maintenance window or get a list of ongoing windows. + required: true + default: null + choices: [ "running", "started", "ongoing" ] + aliases: [] + name: + description: + - PagerDuty unique subdomain. + required: true + default: null + choices: [] + aliases: [] + user: + description: + - PagerDuty user ID. + required: true + default: null + choices: [] + aliases: [] + passwd: + description: + - PagerDuty user password. + required: true + default: null + choices: [] + aliases: [] + token: + description: + - A pagerduty token, generated on the pagerduty site. Can be used instead of + user/passwd combination. + required: true + default: null + choices: [] + aliases: [] + version_added: '1.8' + requester_id: + description: + - ID of user making the request. Only needed when using a token and creating a maintenance_window. + required: true + default: null + choices: [] + aliases: [] + version_added: '1.8' + service: + description: + - PagerDuty service ID. + required: false + default: null + choices: [] + aliases: [] + hours: + description: + - Length of maintenance window in hours. + required: false + default: 1 + choices: [] + aliases: [] + minutes: + description: + - Maintenance window in minutes (this is added to the hours). + required: false + default: 0 + choices: [] + aliases: [] + version_added: '1.8' + desc: + description: + - Short description of maintenance window. + required: false + default: Created by Ansible + choices: [] + aliases: [] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 + +notes: + - This module does not yet have support to end maintenance windows. +''' + +EXAMPLES=''' +# List ongoing maintenance windows using a user/passwd +- pagerduty: name=companyabc user=example@example.com passwd=password123 state=ongoing + +# List ongoing maintenance windows using a token +- pagerduty: name=companyabc token=xxxxxxxxxxxxxx state=ongoing + +# Create a 1 hour maintenance window for service FOO123, using a user/passwd +- pagerduty: name=companyabc + user=example@example.com + passwd=password123 + state=running + service=FOO123 + +# Create a 5 minute maintenance window for service FOO123, using a token +- pagerduty: name=companyabc + token=xxxxxxxxxxxxxx + hours=0 + minutes=5 + state=running + service=FOO123 + + +# Create a 4 hour maintenance window for service FOO123 with the description "deployment". +- pagerduty: name=companyabc + user=example@example.com + passwd=password123 + state=running + service=FOO123 + hours=4 + desc=deployment +''' + +import json +import datetime +import base64 + +def auth_header(user, passwd, token): + if token: + return "Token token=%s" % token + + auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '') + return "Basic %s" % auth + +def ongoing(module, name, user, passwd, token): + url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows/ongoing" + headers = {"Authorization": auth_header(user, passwd, token)} + + response, info = fetch_url(module, url, headers=headers) + if info['status'] != 200: + module.fail_json(msg="failed to lookup the ongoing window: %s" % info['msg']) + + return False, response.read() + + +def create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc): + now = datetime.datetime.utcnow() + later = now + datetime.timedelta(hours=int(hours), minutes=int(minutes)) + start = now.strftime("%Y-%m-%dT%H:%M:%SZ") + end = later.strftime("%Y-%m-%dT%H:%M:%SZ") + + url = "https://" + name + ".pagerduty.com/api/v1/maintenance_windows" + headers = { + 'Authorization': auth_header(user, passwd, token), + 'Content-Type' : 'application/json', + } + request_data = {'maintenance_window': {'start_time': start, 'end_time': end, 'description': desc, 'service_ids': [service]}} + if requester_id: + request_data['requester_id'] = requester_id + else: + if token: + module.fail_json(msg="requester_id is required when using a token") + + data = json.dumps(request_data) + response, info = fetch_url(module, url, data=data, headers=headers, method='POST') + if info['status'] != 200: + module.fail_json(msg="failed to create the window: %s" % info['msg']) + + return False, response.read() + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, choices=['running', 'started', 'ongoing']), + name=dict(required=True), + user=dict(required=False), + passwd=dict(required=False), + token=dict(required=False), + service=dict(required=False), + requester_id=dict(required=False), + hours=dict(default='1', required=False), + minutes=dict(default='0', required=False), + desc=dict(default='Created by Ansible', required=False), + validate_certs = dict(default='yes', type='bool'), + ) + ) + + state = module.params['state'] + name = module.params['name'] + user = module.params['user'] + passwd = module.params['passwd'] + token = module.params['token'] + service = module.params['service'] + hours = module.params['hours'] + minutes = module.params['minutes'] + token = module.params['token'] + desc = module.params['desc'] + requester_id = module.params['requester_id'] + + if not token and not (user or passwd): + module.fail_json(msg="neither user and passwd nor token specified") + + if state == "running" or state == "started": + if not service: + module.fail_json(msg="service not specified") + (rc, out) = create(module, name, user, passwd, token, requester_id, service, hours, minutes, desc) + + if state == "ongoing": + (rc, out) = ongoing(module, name, user, passwd, token) + + if rc != 0: + module.fail_json(msg="failed", result=out) + + module.exit_json(msg="success", result=out) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() diff --git a/lib/ansible/modules/extras/monitoring/pingdom b/lib/ansible/modules/extras/monitoring/pingdom new file mode 100644 index 00000000000..6f658cd9505 --- /dev/null +++ b/lib/ansible/modules/extras/monitoring/pingdom @@ -0,0 +1,135 @@ +#!/usr/bin/python + +DOCUMENTATION = ''' + +module: pingdom +short_description: Pause/unpause Pingdom alerts +description: + - This module will let you pause/unpause Pingdom alerts +version_added: "1.2" +author: Justin Johns +requirements: + - "This pingdom python library: https://github.com/mbabineau/pingdom-python" +options: + state: + description: + - Define whether or not the check should be running or paused. + required: true + default: null + choices: [ "running", "paused" ] + aliases: [] + checkid: + description: + - Pingdom ID of the check. + required: true + default: null + choices: [] + aliases: [] + uid: + description: + - Pingdom user ID. + required: true + default: null + choices: [] + aliases: [] + passwd: + description: + - Pingdom user password. + required: true + default: null + choices: [] + aliases: [] + key: + description: + - Pingdom API key. + required: true + default: null + choices: [] + aliases: [] +notes: + - This module does not yet have support to add/remove checks. +''' + +EXAMPLES = ''' +# Pause the check with the ID of 12345. +- pingdom: uid=example@example.com + passwd=password123 + key=apipassword123 + checkid=12345 + state=paused + +# Unpause the check with the ID of 12345. +- pingdom: uid=example@example.com + passwd=password123 + key=apipassword123 + checkid=12345 + state=running +''' + +try: + import pingdom + HAS_PINGDOM = True +except: + HAS_PINGDOM = False + + + +def pause(checkid, uid, passwd, key): + + c = pingdom.PingdomConnection(uid, passwd, key) + c.modify_check(checkid, paused=True) + check = c.get_check(checkid) + name = check.name + result = check.status + #if result != "paused": # api output buggy - accept raw exception for now + # return (True, name, result) + return (False, name, result) + + +def unpause(checkid, uid, passwd, key): + + c = pingdom.PingdomConnection(uid, passwd, key) + c.modify_check(checkid, paused=False) + check = c.get_check(checkid) + name = check.name + result = check.status + #if result != "up": # api output buggy - accept raw exception for now + # return (True, name, result) + return (False, name, result) + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, choices=['running', 'paused', 'started', 'stopped']), + checkid=dict(required=True), + uid=dict(required=True), + passwd=dict(required=True), + key=dict(required=True) + ) + ) + + if not HAS_PINGDOM: + module.fail_json(msg="Missing requried pingdom module (check docs)") + + checkid = module.params['checkid'] + state = module.params['state'] + uid = module.params['uid'] + passwd = module.params['passwd'] + key = module.params['key'] + + if (state == "paused" or state == "stopped"): + (rc, name, result) = pause(checkid, uid, passwd, key) + + if (state == "running" or state == "started"): + (rc, name, result) = unpause(checkid, uid, passwd, key) + + if rc != 0: + module.fail_json(checkid=checkid, name=name, status=result) + + module.exit_json(checkid=checkid, name=name, status=result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/monitoring/rollbar_deployment b/lib/ansible/modules/extras/monitoring/rollbar_deployment new file mode 100644 index 00000000000..772e78fc5c2 --- /dev/null +++ b/lib/ansible/modules/extras/monitoring/rollbar_deployment @@ -0,0 +1,133 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2014, Max Riveiro, +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: rollbar_deployment +version_added: 1.6 +author: Max Riveiro +short_description: Notify Rollbar about app deployments +description: + - Notify Rollbar about app deployments + (see https://rollbar.com/docs/deploys_other/) +options: + token: + description: + - Your project access token. + required: true + environment: + description: + - Name of the environment being deployed, e.g. 'production'. + required: true + revision: + description: + - Revision number/sha being deployed. + required: true + user: + description: + - User who deployed. + required: false + rollbar_user: + description: + - Rollbar username of the user who deployed. + required: false + comment: + description: + - Deploy comment (e.g. what is being deployed). + required: false + url: + description: + - Optional URL to submit the notification to. + required: false + default: 'https://api.rollbar.com/api/1/deploy/' + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. + This should only be used on personally controlled sites using + self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] +''' + +EXAMPLES = ''' +- rollbar_deployment: token=AAAAAA + environment='staging' + user='ansible' + revision=4.2, + rollbar_user='admin', + comment='Test Deploy' +''' + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True), + environment=dict(required=True), + revision=dict(required=True), + user=dict(required=False), + rollbar_user=dict(required=False), + comment=dict(required=False), + url=dict( + required=False, + default='https://api.rollbar.com/api/1/deploy/' + ), + validate_certs=dict(default='yes', type='bool'), + ), + supports_check_mode=True + ) + + if module.check_mode: + module.exit_json(changed=True) + + params = dict( + access_token=module.params['token'], + environment=module.params['environment'], + revision=module.params['revision'] + ) + + if module.params['user']: + params['local_username'] = module.params['user'] + + if module.params['rollbar_user']: + params['rollbar_username'] = module.params['rollbar_user'] + + if module.params['comment']: + params['comment'] = module.params['comment'] + + url = module.params.get('url') + + try: + data = urllib.urlencode(params) + response, info = fetch_url(module, url, data=data) + except Exception, e: + module.fail_json(msg='Unable to notify Rollbar: %s' % e) + else: + if info['status'] == 200: + module.exit_json(changed=True) + else: + module.fail_json(msg='HTTP result code: %d connecting to %s' % (info['status'], url)) + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() diff --git a/lib/ansible/modules/extras/monitoring/stackdriver b/lib/ansible/modules/extras/monitoring/stackdriver new file mode 100644 index 00000000000..c36964dd9d2 --- /dev/null +++ b/lib/ansible/modules/extras/monitoring/stackdriver @@ -0,0 +1,196 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' + +module: stackdriver +short_description: Send code deploy and annotation events to stackdriver +description: + - Send code deploy and annotation events to Stackdriver +version_added: "1.6" +author: Ben Whaley +options: + key: + description: + - API key. + required: true + default: null + event: + description: + - The type of event to send, either annotation or deploy + choices: ['annotation', 'deploy'] + required: false + default: null + revision_id: + description: + - The revision of the code that was deployed. Required for deploy events + required: false + default: null + deployed_by: + description: + - The person or robot responsible for deploying the code + required: false + default: "Ansible" + deployed_to: + description: + - "The environment code was deployed to. (ie: development, staging, production)" + required: false + default: null + repository: + description: + - The repository (or project) deployed + required: false + default: null + msg: + description: + - The contents of the annotation message, in plain text.  Limited to 256 characters. Required for annotation. + required: false + default: null + annotated_by: + description: + - The person or robot who the annotation should be attributed to. + required: false + default: "Ansible" + level: + description: + - one of INFO/WARN/ERROR, defaults to INFO if not supplied.  May affect display. + choices: ['INFO', 'WARN', 'ERROR'] + required: false + default: 'INFO' + instance_id: + description: + - id of an EC2 instance that this event should be attached to, which will limit the contexts where this event is shown + required: false + default: null + event_epoch: + description: + - "Unix timestamp of where the event should appear in the timeline, defaults to now. Be careful with this." + required: false + default: null +''' + +EXAMPLES = ''' +- stackdriver: key=AAAAAA event=deploy deployed_to=production deployed_by=leeroyjenkins repository=MyWebApp revision_id=abcd123 + +- stackdriver: key=AAAAAA event=annotation msg="Greetings from Ansible" annotated_by=leeroyjenkins level=WARN instance_id=i-abcd1234 +''' + +# =========================================== +# Stackdriver module specific support methods. +# +try: + import json +except ImportError: + import simplejson as json + +def send_deploy_event(module, key, revision_id, deployed_by='Ansible', deployed_to=None, repository=None): + """Send a deploy event to Stackdriver""" + deploy_api = "https://event-gateway.stackdriver.com/v1/deployevent" + + params = {} + params['revision_id'] = revision_id + params['deployed_by'] = deployed_by + if deployed_to: + params['deployed_to'] = deployed_to + if repository: + params['repository'] = repository + + return do_send_request(module, deploy_api, params, key) + +def send_annotation_event(module, key, msg, annotated_by='Ansible', level=None, instance_id=None, event_epoch=None): + """Send an annotation event to Stackdriver""" + annotation_api = "https://event-gateway.stackdriver.com/v1/annotationevent" + + params = {} + params['message'] = msg + if annotated_by: + params['annotated_by'] = annotated_by + if level: + params['level'] = level + if instance_id: + params['instance_id'] = instance_id + if event_epoch: + params['event_epoch'] = event_epoch + + return do_send_request(module, annotation_api, params, key) + +def do_send_request(module, url, params, key): + data = json.dumps(params) + headers = { + 'Content-Type': 'application/json', + 'x-stackdriver-apikey': key + } + response, info = fetch_url(module, url, headers=headers, data=data, method='POST') + if info['status'] != 200: + module.fail_json(msg="Unable to send msg: %s" % info['msg']) + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + key=dict(required=True), + event=dict(required=True, choices=['deploy', 'annotation']), + msg=dict(), + revision_id=dict(), + annotated_by=dict(default='Ansible'), + level=dict(default='INFO', choices=['INFO', 'WARN', 'ERROR']), + instance_id=dict(), + event_epoch=dict(), + deployed_by=dict(default='Ansible'), + deployed_to=dict(), + repository=dict(), + ), + supports_check_mode=True + ) + + key = module.params["key"] + event = module.params["event"] + + # Annotation params + msg = module.params["msg"] + annotated_by = module.params["annotated_by"] + level = module.params["level"] + instance_id = module.params["instance_id"] + event_epoch = module.params["event_epoch"] + + # Deploy params + revision_id = module.params["revision_id"] + deployed_by = module.params["deployed_by"] + deployed_to = module.params["deployed_to"] + repository = module.params["repository"] + + ################################################################## + # deploy requires revision_id + # annotation requires msg + # We verify these manually + ################################################################## + + if event == 'deploy': + if not revision_id: + module.fail_json(msg="revision_id required for deploy events") + try: + send_deploy_event(module, key, revision_id, deployed_by, deployed_to, repository) + except Exception, e: + module.fail_json(msg="unable to sent deploy event: %s" % e) + + if event == 'annotation': + if not msg: + module.fail_json(msg="msg required for annotation events") + try: + send_annotation_event(module, key, msg, annotated_by, level, instance_id, event_epoch) + except Exception, e: + module.fail_json(msg="unable to sent annotation event: %s" % e) + + changed = True + module.exit_json(changed=changed, deployed_by=deployed_by) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() diff --git a/lib/ansible/modules/extras/monitoring/zabbix_maintenance b/lib/ansible/modules/extras/monitoring/zabbix_maintenance new file mode 100644 index 00000000000..e27091e0739 --- /dev/null +++ b/lib/ansible/modules/extras/monitoring/zabbix_maintenance @@ -0,0 +1,371 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Alexander Bulimov +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = ''' + +module: zabbix_maintenance +short_description: Create Zabbix maintenance windows +description: + - This module will let you create Zabbix maintenance windows. +version_added: "1.8" +author: Alexander Bulimov +requirements: + - zabbix-api python module +options: + state: + description: + - Create or remove a maintenance window. + required: true + default: null + choices: [ "present", "absent" ] + server_url: + description: + - Url of Zabbix server, with protocol (http or https). + C(url) is an alias for C(server_url). + required: true + default: null + aliases: [ "url" ] + login_user: + description: + - Zabbix user name. + required: true + default: null + login_password: + description: + - Zabbix user password. + required: true + default: null + host_names: + description: + - Hosts to manage maintenance window for. + Separate multiple hosts with commas. + C(host_name) is an alias for C(host_names). + B(Required) option when C(state) is I(present) + and no C(host_groups) specified. + required: false + default: null + aliases: [ "host_name" ] + host_groups: + description: + - Host groups to manage maintenance window for. + Separate multiple groups with commas. + C(host_group) is an alias for C(host_groups). + B(Required) option when C(state) is I(present) + and no C(host_names) specified. + required: false + default: null + aliases: [ "host_group" ] + minutes: + description: + - Length of maintenance window in minutes. + required: false + default: 10 + name: + description: + - Unique name of maintenance window. + required: true + default: null + desc: + description: + - Short description of maintenance window. + required: true + default: Created by Ansible + collect_data: + description: + - Type of maintenance. With data collection, or without. + required: false + default: "true" +notes: + - Useful for setting hosts in maintenance mode before big update, + and removing maintenance window after update. + - Module creates maintenance window from now() to now() + minutes, + so if Zabbix server's time and host's time are not synchronized, + you will get strange results. + - Install required module with 'pip install zabbix-api' command. + - Checks existance only by maintenance name. +''' + +EXAMPLES = ''' +# Create maintenance window named "Update of www1" +# for host www1.example.com for 90 minutes +- zabbix_maintenance: name="Update of www1" + host_name=www1.example.com + state=present + minutes=90 + server_url=https://monitoring.example.com + login_user=ansible + login_password=pAsSwOrD + +# Create maintenance window named "Mass update" +# for host www1.example.com and host groups Office and Dev +- zabbix_maintenance: name="Update of www1" + host_name=www1.example.com + host_groups=Office,Dev + state=present + server_url=https://monitoring.example.com + login_user=ansible + login_password=pAsSwOrD + +# Create maintenance window named "update" +# for hosts www1.example.com and db1.example.com and without data collection. +- zabbix_maintenance: name=update + host_names=www1.example.com,db1.example.com + state=present + collect_data=false + server_url=https://monitoring.example.com + login_user=ansible + login_password=pAsSwOrD + +# Remove maintenance window named "Test1" +- zabbix_maintenance: name=Test1 + state=absent + server_url=https://monitoring.example.com + login_user=ansible + login_password=pAsSwOrD +''' + +import datetime +import time + +try: + from zabbix_api import ZabbixAPI + HAS_ZABBIX_API = True +except ImportError: + HAS_ZABBIX_API = False + + +def create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc): + end_time = start_time + period + try: + zbx.maintenance.create( + { + "groupids": group_ids, + "hostids": host_ids, + "name": name, + "maintenance_type": maintenance_type, + "active_since": str(start_time), + "active_till": str(end_time), + "description": desc, + "timeperiods": [{ + "timeperiod_type": "0", + "start_date": str(start_time), + "period": str(period), + }] + } + ) + except BaseException as e: + return 1, None, str(e) + return 0, None, None + + +def get_maintenance_id(zbx, name): + try: + result = zbx.maintenance.get( + { + "filter": + { + "name": name, + } + } + ) + except BaseException as e: + return 1, None, str(e) + + maintenance_ids = [] + for res in result: + maintenance_ids.append(res["maintenanceid"]) + + return 0, maintenance_ids, None + + +def delete_maintenance(zbx, maintenance_id): + try: + zbx.maintenance.delete(maintenance_id) + except BaseException as e: + return 1, None, str(e) + return 0, None, None + + +def check_maintenance(zbx, name): + try: + result = zbx.maintenance.exists( + { + "name": name + } + ) + except BaseException as e: + return 1, None, str(e) + return 0, result, None + + +def get_group_ids(zbx, host_groups): + group_ids = [] + for group in host_groups: + try: + result = zbx.hostgroup.get( + { + "output": "extend", + "filter": + { + "name": group + } + } + ) + except BaseException as e: + return 1, None, str(e) + + if not result: + return 1, None, "Group id for group %s not found" % group + + group_ids.append(result[0]["groupid"]) + + return 0, group_ids, None + + +def get_host_ids(zbx, host_names): + host_ids = [] + for host in host_names: + try: + result = zbx.host.get( + { + "output": "extend", + "filter": + { + "name": host + } + } + ) + except BaseException as e: + return 1, None, str(e) + + if not result: + return 1, None, "Host id for host %s not found" % host + + host_ids.append(result[0]["hostid"]) + + return 0, host_ids, None + + +def main(): + module = AnsibleModule( + argument_spec=dict( + state=dict(required=True, default=None, choices=['present', 'absent']), + server_url=dict(required=True, default=None, aliases=['url']), + host_names=dict(type='list', required=False, default=None, aliases=['host_name']), + minutes=dict(type='int', required=False, default=10), + host_groups=dict(type='list', required=False, default=None, aliases=['host_group']), + login_user=dict(required=True, default=None), + login_password=dict(required=True, default=None), + name=dict(required=True, default=None), + desc=dict(required=False, default="Created by Ansible"), + collect_data=dict(type='bool', required=False, default=True), + ), + supports_check_mode=True, + ) + + if not HAS_ZABBIX_API: + module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)") + + host_names = module.params['host_names'] + host_groups = module.params['host_groups'] + state = module.params['state'] + login_user = module.params['login_user'] + login_password = module.params['login_password'] + minutes = module.params['minutes'] + name = module.params['name'] + desc = module.params['desc'] + server_url = module.params['server_url'] + collect_data = module.params['collect_data'] + if collect_data: + maintenance_type = 0 + else: + maintenance_type = 1 + + try: + zbx = ZabbixAPI(server_url) + zbx.login(login_user, login_password) + except BaseException as e: + module.fail_json(msg="Failed to connect to Zabbix server: %s" % e) + + changed = False + + if state == "present": + + now = datetime.datetime.now() + start_time = time.mktime(now.timetuple()) + period = 60 * int(minutes) # N * 60 seconds + + if host_groups: + (rc, group_ids, error) = get_group_ids(zbx, host_groups) + if rc != 0: + module.fail_json(msg="Failed to get group_ids: %s" % error) + else: + group_ids = [] + + if host_names: + (rc, host_ids, error) = get_host_ids(zbx, host_names) + if rc != 0: + module.fail_json(msg="Failed to get host_ids: %s" % error) + else: + host_ids = [] + + (rc, exists, error) = check_maintenance(zbx, name) + if rc != 0: + module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error)) + + if not exists: + if not host_names and not host_groups: + module.fail_json(msg="At least one host_name or host_group must be defined for each created maintenance.") + + if module.check_mode: + changed = True + else: + (rc, _, error) = create_maintenance(zbx, group_ids, host_ids, start_time, maintenance_type, period, name, desc) + if rc == 0: + changed = True + else: + module.fail_json(msg="Failed to create maintenance: %s" % error) + + if state == "absent": + + (rc, exists, error) = check_maintenance(zbx, name) + if rc != 0: + module.fail_json(msg="Failed to check maintenance %s existance: %s" % (name, error)) + + if exists: + (rc, maintenance, error) = get_maintenance_id(zbx, name) + if rc != 0: + module.fail_json(msg="Failed to get maintenance id: %s" % error) + + if maintenance: + if module.check_mode: + changed = True + else: + (rc, _, error) = delete_maintenance(zbx, maintenance) + if rc == 0: + changed = True + else: + module.fail_json(msg="Failed to remove maintenance: %s" % error) + + module.exit_json(changed=changed) + +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/net_infrastructure/a10_server b/lib/ansible/modules/extras/net_infrastructure/a10_server new file mode 100644 index 00000000000..65410536eef --- /dev/null +++ b/lib/ansible/modules/extras/net_infrastructure/a10_server @@ -0,0 +1,269 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +""" +Ansible module to manage A10 Networks slb server objects +(c) 2014, Mischa Peters + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +DOCUMENTATION = ''' +--- +module: a10_server +version_added: 1.8 +short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices +description: + - Manage slb server objects on A10 Networks devices via aXAPI +author: Mischa Peters +notes: + - Requires A10 Networks aXAPI 2.1 +options: + host: + description: + - hostname or ip of your A10 Networks device + required: true + default: null + aliases: [] + choices: [] + username: + description: + - admin account of your A10 Networks device + required: true + default: null + aliases: ['user', 'admin'] + choices: [] + password: + description: + - admin password of your A10 Networks device + required: true + default: null + aliases: ['pass', 'pwd'] + choices: [] + server_name: + description: + - slb server name + required: true + default: null + aliases: ['server'] + choices: [] + server_ip: + description: + - slb server IP address + required: false + default: null + aliases: ['ip', 'address'] + choices: [] + server_status: + description: + - slb virtual server status + required: false + default: enable + aliases: ['status'] + choices: ['enabled', 'disabled'] + server_ports: + description: + - A list of ports to create for the server. Each list item should be a + dictionary which specifies the C(port:) and C(protocol:), but can also optionally + specify the C(status:). See the examples below for details. This parameter is + required when C(state) is C(present). + required: false + default: null + aliases: [] + choices: [] + state: + description: + - create, update or remove slb server + required: false + default: present + aliases: [] + choices: ['present', 'absent'] +''' + +EXAMPLES = ''' +# Create a new server +- a10_server: + host: a10.mydomain.com + username: myadmin + password: mypassword + server: test + server_ip: 1.1.1.100 + server_ports: + - port_num: 8080 + protocol: tcp + - port_num: 8443 + protocol: TCP + +''' + +VALID_PORT_FIELDS = ['port_num', 'protocol', 'status'] + +def validate_ports(module, ports): + for item in ports: + for key in item: + if key not in VALID_PORT_FIELDS: + module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS))) + + # validate the port number is present and an integer + if 'port_num' in item: + try: + item['port_num'] = int(item['port_num']) + except: + module.fail_json(msg="port_num entries in the port definitions must be integers") + else: + module.fail_json(msg="port definitions must define the port_num field") + + # validate the port protocol is present, and convert it to + # the internal API integer value (and validate it) + if 'protocol' in item: + protocol = axapi_get_port_protocol(item['protocol']) + if not protocol: + module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_PORT_PROTOCOLS)) + else: + item['protocol'] = protocol + else: + module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_PORT_PROTOCOLS)) + + # convert the status to the internal API integer value + if 'status' in item: + item['status'] = axapi_enabled_disabled(item['status']) + else: + item['status'] = 1 + + +def main(): + argument_spec = a10_argument_spec() + argument_spec.update(url_argument_spec()) + argument_spec.update( + dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + server_name=dict(type='str', aliases=['server'], required=True), + server_ip=dict(type='str', aliases=['ip', 'address']), + server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']), + server_ports=dict(type='list', aliases=['port'], default=[]), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False + ) + + host = module.params['host'] + username = module.params['username'] + password = module.params['password'] + state = module.params['state'] + write_config = module.params['write_config'] + slb_server = module.params['server_name'] + slb_server_ip = module.params['server_ip'] + slb_server_status = module.params['server_status'] + slb_server_ports = module.params['server_ports'] + + if slb_server is None: + module.fail_json(msg='server_name is required') + + axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host + session_url = axapi_authenticate(module, axapi_base_url, username, password) + + # validate the ports data structure + validate_ports(module, slb_server_ports) + + json_post = { + 'server': { + 'name': slb_server, + 'host': slb_server_ip, + 'status': axapi_enabled_disabled(slb_server_status), + 'port_list': slb_server_ports, + } + } + + slb_server_data = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server})) + slb_server_exists = not axapi_failure(slb_server_data) + + changed = False + if state == 'present': + if not slb_server_ip: + module.fail_json(msg='you must specify an IP address when creating a server') + + if not slb_server_exists: + result = axapi_call(module, session_url + '&method=slb.server.create', json.dumps(json_post)) + if axapi_failure(result): + module.fail_json(msg="failed to create the server: %s" % result['response']['err']['msg']) + changed = True + else: + def needs_update(src_ports, dst_ports): + ''' + Checks to determine if the port definitions of the src_ports + array are in or different from those in dst_ports. If there is + a difference, this function returns true, otherwise false. + ''' + for src_port in src_ports: + found = False + different = False + for dst_port in dst_ports: + if src_port['port_num'] == dst_port['port_num']: + found = True + for valid_field in VALID_PORT_FIELDS: + if src_port[valid_field] != dst_port[valid_field]: + different = True + break + if found or different: + break + if not found or different: + return True + # every port from the src exists in the dst, and none of them were different + return False + + defined_ports = slb_server_data.get('server', {}).get('port_list', []) + + # we check for a needed update both ways, in case ports + # are missing from either the ones specified by the user + # or from those on the device + if needs_update(defined_ports, slb_server_ports) or needs_update(slb_server_ports, defined_ports): + result = axapi_call(module, session_url + '&method=slb.server.update', json.dumps(json_post)) + if axapi_failure(result): + module.fail_json(msg="failed to update the server: %s" % result['response']['err']['msg']) + changed = True + + # if we changed things, get the full info regarding + # the service group for the return data below + if changed: + result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': slb_server})) + else: + result = slb_server_data + elif state == 'absent': + if slb_server_exists: + result = axapi_call(module, session_url + '&method=slb.server.delete', json.dumps({'name': slb_server})) + changed = True + else: + result = dict(msg="the server was not present") + + # if the config has changed, save the config unless otherwise requested + if changed and write_config: + write_result = axapi_call(module, session_url + '&method=system.action.write_memory') + if axapi_failure(write_result): + module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) + + # log out of the session nicely and exit + axapi_call(module, session_url + '&method=session.close') + module.exit_json(changed=changed, content=result) + +# standard ansible module imports +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +from ansible.module_utils.a10 import * + +main() diff --git a/lib/ansible/modules/extras/net_infrastructure/a10_service_group b/lib/ansible/modules/extras/net_infrastructure/a10_service_group new file mode 100644 index 00000000000..3627e2d12b8 --- /dev/null +++ b/lib/ansible/modules/extras/net_infrastructure/a10_service_group @@ -0,0 +1,341 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +""" +Ansible module to manage A10 Networks slb service-group objects +(c) 2014, Mischa Peters + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +DOCUMENTATION = ''' +--- +module: a10_service_group +version_added: 1.8 +short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices +description: + - Manage slb service-group objects on A10 Networks devices via aXAPI +author: Mischa Peters +notes: + - Requires A10 Networks aXAPI 2.1 + - When a server doesn't exist and is added to the service-group the server will be created +options: + host: + description: + - hostname or ip of your A10 Networks device + required: true + default: null + aliases: [] + choices: [] + username: + description: + - admin account of your A10 Networks device + required: true + default: null + aliases: ['user', 'admin'] + choices: [] + password: + description: + - admin password of your A10 Networks device + required: true + default: null + aliases: ['pass', 'pwd'] + choices: [] + service_group: + description: + - slb service-group name + required: true + default: null + aliases: ['service', 'pool', 'group'] + choices: [] + service_group_protocol: + description: + - slb service-group protocol + required: false + default: tcp + aliases: ['proto', 'protocol'] + choices: ['tcp', 'udp'] + service_group_method: + description: + - slb service-group loadbalancing method + required: false + default: round-robin + aliases: ['method'] + choices: ['round-robin', 'weighted-rr', 'least-connection', 'weighted-least-connection', 'service-least-connection', 'service-weighted-least-connection', 'fastest-response', 'least-request', 'round-robin-strict', 'src-ip-only-hash', 'src-ip-hash'] + servers: + description: + - A list of servers to add to the service group. Each list item should be a + dictionary which specifies the C(server:) and C(port:), but can also optionally + specify the C(status:). See the examples below for details. + required: false + default: null + aliases: [] + choices: [] + write_config: + description: + - If C(yes), any changes will cause a write of the running configuration + to non-volatile memory. This will save I(all) configuration changes, + including those that may have been made manually or through other modules, + so care should be taken when specifying C(yes). + required: false + default: "no" + choices: ["yes", "no"] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled devices using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + +''' + +EXAMPLES = ''' +# Create a new service-group +- a10_service_group: + host: a10.mydomain.com + username: myadmin + password: mypassword + service_group: sg-80-tcp + servers: + - server: foo1.mydomain.com + port: 8080 + - server: foo2.mydomain.com + port: 8080 + - server: foo3.mydomain.com + port: 8080 + - server: foo4.mydomain.com + port: 8080 + status: disabled + +''' + +VALID_SERVICE_GROUP_FIELDS = ['name', 'protocol', 'lb_method'] +VALID_SERVER_FIELDS = ['server', 'port', 'status'] + +def validate_servers(module, servers): + for item in servers: + for key in item: + if key not in VALID_SERVER_FIELDS: + module.fail_json(msg="invalid server field (%s), must be one of: %s" % (key, ','.join(VALID_SERVER_FIELDS))) + + # validate the server name is present + if 'server' not in item: + module.fail_json(msg="server definitions must define the server field") + + # validate the port number is present and an integer + if 'port' in item: + try: + item['port'] = int(item['port']) + except: + module.fail_json(msg="server port definitions must be integers") + else: + module.fail_json(msg="server definitions must define the port field") + + # convert the status to the internal API integer value + if 'status' in item: + item['status'] = axapi_enabled_disabled(item['status']) + else: + item['status'] = 1 + + +def main(): + argument_spec = a10_argument_spec() + argument_spec.update(url_argument_spec()) + argument_spec.update( + dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + service_group=dict(type='str', aliases=['service', 'pool', 'group'], required=True), + service_group_protocol=dict(type='str', default='tcp', aliases=['proto', 'protocol'], choices=['tcp', 'udp']), + service_group_method=dict(type='str', default='round-robin', + aliases=['method'], + choices=['round-robin', + 'weighted-rr', + 'least-connection', + 'weighted-least-connection', + 'service-least-connection', + 'service-weighted-least-connection', + 'fastest-response', + 'least-request', + 'round-robin-strict', + 'src-ip-only-hash', + 'src-ip-hash']), + servers=dict(type='list', aliases=['server', 'member'], default=[]), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False + ) + + host = module.params['host'] + username = module.params['username'] + password = module.params['password'] + state = module.params['state'] + write_config = module.params['write_config'] + slb_service_group = module.params['service_group'] + slb_service_group_proto = module.params['service_group_protocol'] + slb_service_group_method = module.params['service_group_method'] + slb_servers = module.params['servers'] + + if slb_service_group is None: + module.fail_json(msg='service_group is required') + + axapi_base_url = 'https://' + host + '/services/rest/V2.1/?format=json' + load_balancing_methods = {'round-robin': 0, + 'weighted-rr': 1, + 'least-connection': 2, + 'weighted-least-connection': 3, + 'service-least-connection': 4, + 'service-weighted-least-connection': 5, + 'fastest-response': 6, + 'least-request': 7, + 'round-robin-strict': 8, + 'src-ip-only-hash': 14, + 'src-ip-hash': 15} + + if not slb_service_group_proto or slb_service_group_proto.lower() == 'tcp': + protocol = 2 + else: + protocol = 3 + + # validate the server data list structure + validate_servers(module, slb_servers) + + json_post = { + 'service_group': { + 'name': slb_service_group, + 'protocol': protocol, + 'lb_method': load_balancing_methods[slb_service_group_method], + } + } + + # first we authenticate to get a session id + session_url = axapi_authenticate(module, axapi_base_url, username, password) + + # then we check to see if the specified group exists + slb_result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group})) + slb_service_group_exist = not axapi_failure(slb_result) + + changed = False + if state == 'present': + # before creating/updating we need to validate that servers + # defined in the servers list exist to prevent errors + checked_servers = [] + for server in slb_servers: + result = axapi_call(module, session_url + '&method=slb.server.search', json.dumps({'name': server['server']})) + if axapi_failure(result): + module.fail_json(msg="the server %s specified in the servers list does not exist" % server['server']) + checked_servers.append(server['server']) + + if not slb_service_group_exist: + result = axapi_call(module, session_url + '&method=slb.service_group.create', json.dumps(json_post)) + if axapi_failure(result): + module.fail_json(msg=result['response']['err']['msg']) + changed = True + else: + # check to see if the service group definition without the + # server members is different, and update that individually + # if it needs it + do_update = False + for field in VALID_SERVICE_GROUP_FIELDS: + if json_post['service_group'][field] != slb_result['service_group'][field]: + do_update = True + break + + if do_update: + result = axapi_call(module, session_url + '&method=slb.service_group.update', json.dumps(json_post)) + if axapi_failure(result): + module.fail_json(msg=result['response']['err']['msg']) + changed = True + + # next we pull the defined list of servers out of the returned + # results to make it a bit easier to iterate over + defined_servers = slb_result.get('service_group', {}).get('member_list', []) + + # next we add/update new member servers from the user-specified + # list if they're different or not on the target device + for server in slb_servers: + found = False + different = False + for def_server in defined_servers: + if server['server'] == def_server['server']: + found = True + for valid_field in VALID_SERVER_FIELDS: + if server[valid_field] != def_server[valid_field]: + different = True + break + if found or different: + break + # add or update as required + server_data = { + "name": slb_service_group, + "member": server, + } + if not found: + result = axapi_call(module, session_url + '&method=slb.service_group.member.create', json.dumps(server_data)) + changed = True + elif different: + result = axapi_call(module, session_url + '&method=slb.service_group.member.update', json.dumps(server_data)) + changed = True + + # finally, remove any servers that are on the target + # device but were not specified in the list given + for server in defined_servers: + found = False + for slb_server in slb_servers: + if server['server'] == slb_server['server']: + found = True + break + # remove if not found + server_data = { + "name": slb_service_group, + "member": server, + } + if not found: + result = axapi_call(module, session_url + '&method=slb.service_group.member.delete', json.dumps(server_data)) + changed = True + + # if we changed things, get the full info regarding + # the service group for the return data below + if changed: + result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': slb_service_group})) + else: + result = slb_result + elif state == 'absent': + if slb_service_group_exist: + result = axapi_call(module, session_url + '&method=slb.service_group.delete', json.dumps({'name': slb_service_group})) + changed = True + else: + result = dict(msg="the service group was not present") + + # if the config has changed, save the config unless otherwise requested + if changed and write_config: + write_result = axapi_call(module, session_url + '&method=system.action.write_memory') + if axapi_failure(write_result): + module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) + + # log out of the session nicely and exit + axapi_call(module, session_url + '&method=session.close') + module.exit_json(changed=changed, content=result) + +# standard ansible module imports +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +from ansible.module_utils.a10 import * + +main() diff --git a/lib/ansible/modules/extras/net_infrastructure/a10_virtual_server b/lib/ansible/modules/extras/net_infrastructure/a10_virtual_server new file mode 100644 index 00000000000..3d807c098cf --- /dev/null +++ b/lib/ansible/modules/extras/net_infrastructure/a10_virtual_server @@ -0,0 +1,299 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +""" +Ansible module to manage A10 Networks slb virtual server objects +(c) 2014, Mischa Peters + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +DOCUMENTATION = ''' +--- +module: a10_virtual_server +version_added: 1.8 +short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices +description: + - Manage slb virtual server objects on A10 Networks devices via aXAPI +author: Mischa Peters +notes: + - Requires A10 Networks aXAPI 2.1 +requirements: + - urllib2 + - re +options: + host: + description: + - hostname or ip of your A10 Networks device + required: true + default: null + aliases: [] + choices: [] + username: + description: + - admin account of your A10 Networks device + required: true + default: null + aliases: ['user', 'admin'] + choices: [] + password: + description: + - admin password of your A10 Networks device + required: true + default: null + aliases: ['pass', 'pwd'] + choices: [] + virtual_server: + description: + - slb virtual server name + required: true + default: null + aliases: ['vip', 'virtual'] + choices: [] + virtual_server_ip: + description: + - slb virtual server ip address + required: false + default: null + aliases: ['ip', 'address'] + choices: [] + virtual_server_status: + description: + - slb virtual server status + required: false + default: enable + aliases: ['status'] + choices: ['enabled', 'disabled'] + virtual_server_ports: + description: + - A list of ports to create for the virtual server. Each list item should be a + dictionary which specifies the C(port:) and C(type:), but can also optionally + specify the C(service_group:) as well as the C(status:). See the examples + below for details. This parameter is required when C(state) is C(present). + required: false + write_config: + description: + - If C(yes), any changes will cause a write of the running configuration + to non-volatile memory. This will save I(all) configuration changes, + including those that may have been made manually or through other modules, + so care should be taken when specifying C(yes). + required: false + default: "no" + choices: ["yes", "no"] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled devices using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + +''' + +EXAMPLES = ''' +# Create a new virtual server +- a10_virtual_server: + host: a10.mydomain.com + username: myadmin + password: mypassword + virtual_server: vserver1 + virtual_server_ip: 1.1.1.1 + virtual_server_ports: + - port: 80 + protocol: TCP + service_group: sg-80-tcp + - port: 443 + protocol: HTTPS + service_group: sg-443-https + - port: 8080 + protocol: http + status: disabled + +''' + +VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status'] + +def validate_ports(module, ports): + for item in ports: + for key in item: + if key not in VALID_PORT_FIELDS: + module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS))) + + # validate the port number is present and an integer + if 'port' in item: + try: + item['port'] = int(item['port']) + except: + module.fail_json(msg="port definitions must be integers") + else: + module.fail_json(msg="port definitions must define the port field") + + # validate the port protocol is present, and convert it to + # the internal API integer value (and validate it) + if 'protocol' in item: + protocol = axapi_get_vport_protocol(item['protocol']) + if not protocol: + module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_VPORT_PROTOCOLS)) + else: + item['protocol'] = protocol + else: + module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_VPORT_PROTOCOLS)) + + # convert the status to the internal API integer value + if 'status' in item: + item['status'] = axapi_enabled_disabled(item['status']) + else: + item['status'] = 1 + + # ensure the service_group field is at least present + if 'service_group' not in item: + item['service_group'] = '' + +def main(): + argument_spec = a10_argument_spec() + argument_spec.update(url_argument_spec()) + argument_spec.update( + dict( + state=dict(type='str', default='present', choices=['present', 'absent']), + virtual_server=dict(type='str', aliases=['vip', 'virtual'], required=True), + virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True), + virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']), + virtual_server_ports=dict(type='list', required=True), + ) + ) + + module = AnsibleModule( + argument_spec=argument_spec, + supports_check_mode=False + ) + + host = module.params['host'] + username = module.params['username'] + password = module.params['password'] + state = module.params['state'] + write_config = module.params['write_config'] + slb_virtual = module.params['virtual_server'] + slb_virtual_ip = module.params['virtual_server_ip'] + slb_virtual_status = module.params['virtual_server_status'] + slb_virtual_ports = module.params['virtual_server_ports'] + + if slb_virtual is None: + module.fail_json(msg='virtual_server is required') + + validate_ports(module, slb_virtual_ports) + + axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host + session_url = axapi_authenticate(module, axapi_base_url, username, password) + + slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual})) + slb_virtual_exists = not axapi_failure(slb_virtual_data) + + changed = False + if state == 'present': + json_post = { + 'virtual_server': { + 'name': slb_virtual, + 'address': slb_virtual_ip, + 'status': axapi_enabled_disabled(slb_virtual_status), + 'vport_list': slb_virtual_ports, + } + } + + # before creating/updating we need to validate that any + # service groups defined in the ports list exist since + # since the API will still create port definitions for + # them while indicating a failure occurred + checked_service_groups = [] + for port in slb_virtual_ports: + if 'service_group' in port and port['service_group'] not in checked_service_groups: + # skip blank service group entries + if port['service_group'] == '': + continue + result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': port['service_group']})) + if axapi_failure(result): + module.fail_json(msg="the service group %s specified in the ports list does not exist" % port['service_group']) + checked_service_groups.append(port['service_group']) + + if not slb_virtual_exists: + result = axapi_call(module, session_url + '&method=slb.virtual_server.create', json.dumps(json_post)) + if axapi_failure(result): + module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg']) + changed = True + else: + def needs_update(src_ports, dst_ports): + ''' + Checks to determine if the port definitions of the src_ports + array are in or different from those in dst_ports. If there is + a difference, this function returns true, otherwise false. + ''' + for src_port in src_ports: + found = False + different = False + for dst_port in dst_ports: + if src_port['port'] == dst_port['port']: + found = True + for valid_field in VALID_PORT_FIELDS: + if src_port[valid_field] != dst_port[valid_field]: + different = True + break + if found or different: + break + if not found or different: + return True + # every port from the src exists in the dst, and none of them were different + return False + + defined_ports = slb_virtual_data.get('virtual_server', {}).get('vport_list', []) + + # we check for a needed update both ways, in case ports + # are missing from either the ones specified by the user + # or from those on the device + if needs_update(defined_ports, slb_virtual_ports) or needs_update(slb_virtual_ports, defined_ports): + result = axapi_call(module, session_url + '&method=slb.virtual_server.update', json.dumps(json_post)) + if axapi_failure(result): + module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg']) + changed = True + + # if we changed things, get the full info regarding + # the service group for the return data below + if changed: + result = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual})) + else: + result = slb_virtual_data + elif state == 'absent': + if slb_virtual_exists: + result = axapi_call(module, session_url + '&method=slb.virtual_server.delete', json.dumps({'name': slb_virtual})) + changed = True + else: + result = dict(msg="the virtual server was not present") + + # if the config has changed, save the config unless otherwise requested + if changed and write_config: + write_result = axapi_call(module, session_url + '&method=system.action.write_memory') + if axapi_failure(write_result): + module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg']) + + # log out of the session nicely and exit + axapi_call(module, session_url + '&method=session.close') + module.exit_json(changed=changed, content=result) + +# standard ansible module imports +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +from ansible.module_utils.a10 import * + +main() + diff --git a/lib/ansible/modules/extras/net_infrastructure/bigip_facts b/lib/ansible/modules/extras/net_infrastructure/bigip_facts new file mode 100755 index 00000000000..99a1e31de68 --- /dev/null +++ b/lib/ansible/modules/extras/net_infrastructure/bigip_facts @@ -0,0 +1,1670 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Matt Hite +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: bigip_facts +short_description: "Collect facts from F5 BIG-IP devices" +description: + - "Collect facts from F5 BIG-IP devices via iControl SOAP API" +version_added: "1.6" +author: Matt Hite +notes: + - "Requires BIG-IP software version >= 11.4" + - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" + - "Best run as a local_action in your playbook" + - "Tested with manager and above account privilege level" + +requirements: + - bigsuds +options: + server: + description: + - BIG-IP host + required: true + default: null + choices: [] + aliases: [] + user: + description: + - BIG-IP username + required: true + default: null + choices: [] + aliases: [] + password: + description: + - BIG-IP password + required: true + default: null + choices: [] + aliases: [] + session: + description: + - BIG-IP session support; may be useful to avoid concurrency + issues in certain circumstances. + required: false + default: true + choices: [] + aliases: [] + include: + description: + - Fact category or list of categories to collect + required: true + default: null + choices: ['address_class', 'certificate', 'client_ssl_profile', + 'device_group', 'interface', 'key', 'node', 'pool', 'rule', + 'self_ip', 'software', 'system_info', 'traffic_group', + 'trunk', 'virtual_address', 'virtual_server', 'vlan'] + aliases: [] + filter: + description: + - Shell-style glob matching string used to filter fact keys. Not + applicable for software and system_info fact categories. + required: false + default: null + choices: [] + aliases: [] +''' + +EXAMPLES = ''' + +## playbook task examples: + +--- +# file bigip-test.yml +# ... +- hosts: bigip-test + tasks: + - name: Collect BIG-IP facts + local_action: > + bigip_facts + server=lb.mydomain.com + user=admin + password=mysecret + include=interface,vlan + +''' + +try: + import bigsuds + from suds import MethodNotFound +except ImportError: + bigsuds_found = False +else: + bigsuds_found = True + +import fnmatch +import traceback +import re + +# =========================================== +# bigip_facts module specific support methods. +# + +class F5(object): + """F5 iControl class. + + F5 BIG-IP iControl API class. + + Attributes: + api: iControl API instance. + """ + + def __init__(self, host, user, password, session=False): + self.api = bigsuds.BIGIP(hostname=host, username=user, password=password) + if session: + self.start_session() + + def start_session(self): + self.api = self.api.with_session_id() + + def get_api(self): + return self.api + + def set_recursive_query_state(self, state): + self.api.System.Session.set_recursive_query_state(state) + + def get_recursive_query_state(self): + return self.api.System.Session.get_recursive_query_state() + + def enable_recursive_query_state(self): + self.set_recursive_query_state('STATE_ENABLED') + + def disable_recursive_query_state(self): + self.set_recursive_query_state('STATE_DISABLED') + + def set_active_folder(self, folder): + self.api.System.Session.set_active_folder(folder=folder) + + def get_active_folder(self): + return self.api.System.Session.get_active_folder() + + +class Interfaces(object): + """Interfaces class. + + F5 BIG-IP interfaces class. + + Attributes: + api: iControl API instance. + interfaces: A list of BIG-IP interface names. + """ + + def __init__(self, api, regex=None): + self.api = api + self.interfaces = api.Networking.Interfaces.get_list() + if regex: + re_filter = re.compile(regex) + self.interfaces = filter(re_filter.search, self.interfaces) + + def get_list(self): + return self.interfaces + + def get_active_media(self): + return self.api.Networking.Interfaces.get_active_media(self.interfaces) + + def get_actual_flow_control(self): + return self.api.Networking.Interfaces.get_actual_flow_control(self.interfaces) + + def get_bundle_state(self): + return self.api.Networking.Interfaces.get_bundle_state(self.interfaces) + + def get_description(self): + return self.api.Networking.Interfaces.get_description(self.interfaces) + + def get_dual_media_state(self): + return self.api.Networking.Interfaces.get_dual_media_state(self.interfaces) + + def get_enabled_state(self): + return self.api.Networking.Interfaces.get_enabled_state(self.interfaces) + + def get_if_index(self): + return self.api.Networking.Interfaces.get_if_index(self.interfaces) + + def get_learning_mode(self): + return self.api.Networking.Interfaces.get_learning_mode(self.interfaces) + + def get_lldp_admin_status(self): + return self.api.Networking.Interfaces.get_lldp_admin_status(self.interfaces) + + def get_lldp_tlvmap(self): + return self.api.Networking.Interfaces.get_lldp_tlvmap(self.interfaces) + + def get_mac_address(self): + return self.api.Networking.Interfaces.get_mac_address(self.interfaces) + + def get_media(self): + return self.api.Networking.Interfaces.get_media(self.interfaces) + + def get_media_option(self): + return self.api.Networking.Interfaces.get_media_option(self.interfaces) + + def get_media_option_sfp(self): + return self.api.Networking.Interfaces.get_media_option_sfp(self.interfaces) + + def get_media_sfp(self): + return self.api.Networking.Interfaces.get_media_sfp(self.interfaces) + + def get_media_speed(self): + return self.api.Networking.Interfaces.get_media_speed(self.interfaces) + + def get_media_status(self): + return self.api.Networking.Interfaces.get_media_status(self.interfaces) + + def get_mtu(self): + return self.api.Networking.Interfaces.get_mtu(self.interfaces) + + def get_phy_master_slave_mode(self): + return self.api.Networking.Interfaces.get_phy_master_slave_mode(self.interfaces) + + def get_prefer_sfp_state(self): + return self.api.Networking.Interfaces.get_prefer_sfp_state(self.interfaces) + + def get_flow_control(self): + return self.api.Networking.Interfaces.get_requested_flow_control(self.interfaces) + + def get_sflow_poll_interval(self): + return self.api.Networking.Interfaces.get_sflow_poll_interval(self.interfaces) + + def get_sflow_poll_interval_global(self): + return self.api.Networking.Interfaces.get_sflow_poll_interval_global(self.interfaces) + + def get_sfp_media_state(self): + return self.api.Networking.Interfaces.get_sfp_media_state(self.interfaces) + + def get_stp_active_edge_port_state(self): + return self.api.Networking.Interfaces.get_stp_active_edge_port_state(self.interfaces) + + def get_stp_enabled_state(self): + return self.api.Networking.Interfaces.get_stp_enabled_state(self.interfaces) + + def get_stp_link_type(self): + return self.api.Networking.Interfaces.get_stp_link_type(self.interfaces) + + def get_stp_protocol_detection_reset_state(self): + return self.api.Networking.Interfaces.get_stp_protocol_detection_reset_state(self.interfaces) + + +class SelfIPs(object): + """Self IPs class. + + F5 BIG-IP Self IPs class. + + Attributes: + api: iControl API instance. + self_ips: List of self IPs. + """ + + def __init__(self, api, regex=None): + self.api = api + self.self_ips = api.Networking.SelfIPV2.get_list() + if regex: + re_filter = re.compile(regex) + self.self_ips = filter(re_filter.search, self.self_ips) + + def get_list(self): + return self.self_ips + + def get_address(self): + return self.api.Networking.SelfIPV2.get_address(self.self_ips) + + def get_allow_access_list(self): + return self.api.Networking.SelfIPV2.get_allow_access_list(self.self_ips) + + def get_description(self): + return self.api.Networking.SelfIPV2.get_description(self.self_ips) + + def get_enforced_firewall_policy(self): + return self.api.Networking.SelfIPV2.get_enforced_firewall_policy(self.self_ips) + + def get_floating_state(self): + return self.api.Networking.SelfIPV2.get_floating_state(self.self_ips) + + def get_fw_rule(self): + return self.api.Networking.SelfIPV2.get_fw_rule(self.self_ips) + + def get_netmask(self): + return self.api.Networking.SelfIPV2.get_netmask(self.self_ips) + + def get_staged_firewall_policy(self): + return self.api.Networking.SelfIPV2.get_staged_firewall_policy(self.self_ips) + + def get_traffic_group(self): + return self.api.Networking.SelfIPV2.get_traffic_group(self.self_ips) + + def get_vlan(self): + return self.api.Networking.SelfIPV2.get_vlan(self.self_ips) + + def get_is_traffic_group_inherited(self): + return self.api.Networking.SelfIPV2.is_traffic_group_inherited(self.self_ips) + + +class Trunks(object): + """Trunks class. + + F5 BIG-IP trunks class. + + Attributes: + api: iControl API instance. + trunks: List of trunks. + """ + + def __init__(self, api, regex=None): + self.api = api + self.trunks = api.Networking.Trunk.get_list() + if regex: + re_filter = re.compile(regex) + self.trunks = filter(re_filter.search, self.trunks) + + def get_list(self): + return self.trunks + + def get_active_lacp_state(self): + return self.api.Networking.Trunk.get_active_lacp_state(self.trunks) + + def get_configured_member_count(self): + return self.api.Networking.Trunk.get_configured_member_count(self.trunks) + + def get_description(self): + return self.api.Networking.Trunk.get_description(self.trunks) + + def get_distribution_hash_option(self): + return self.api.Networking.Trunk.get_distribution_hash_option(self.trunks) + + def get_interface(self): + return self.api.Networking.Trunk.get_interface(self.trunks) + + def get_lacp_enabled_state(self): + return self.api.Networking.Trunk.get_lacp_enabled_state(self.trunks) + + def get_lacp_timeout_option(self): + return self.api.Networking.Trunk.get_lacp_timeout_option(self.trunks) + + def get_link_selection_policy(self): + return self.api.Networking.Trunk.get_link_selection_policy(self.trunks) + + def get_media_speed(self): + return self.api.Networking.Trunk.get_media_speed(self.trunks) + + def get_media_status(self): + return self.api.Networking.Trunk.get_media_status(self.trunks) + + def get_operational_member_count(self): + return self.api.Networking.Trunk.get_operational_member_count(self.trunks) + + def get_stp_enabled_state(self): + return self.api.Networking.Trunk.get_stp_enabled_state(self.trunks) + + def get_stp_protocol_detection_reset_state(self): + return self.api.Networking.Trunk.get_stp_protocol_detection_reset_state(self.trunks) + + +class Vlans(object): + """Vlans class. + + F5 BIG-IP Vlans class. + + Attributes: + api: iControl API instance. + vlans: List of VLANs. + """ + + def __init__(self, api, regex=None): + self.api = api + self.vlans = api.Networking.VLAN.get_list() + if regex: + re_filter = re.compile(regex) + self.vlans = filter(re_filter.search, self.vlans) + + def get_list(self): + return self.vlans + + def get_auto_lasthop(self): + return self.api.Networking.VLAN.get_auto_lasthop(self.vlans) + + def get_cmp_hash_algorithm(self): + return self.api.Networking.VLAN.get_cmp_hash_algorithm(self.vlans) + + def get_description(self): + return self.api.Networking.VLAN.get_description(self.vlans) + + def get_dynamic_forwarding(self): + return self.api.Networking.VLAN.get_dynamic_forwarding(self.vlans) + + def get_failsafe_action(self): + return self.api.Networking.VLAN.get_failsafe_action(self.vlans) + + def get_failsafe_state(self): + return self.api.Networking.VLAN.get_failsafe_state(self.vlans) + + def get_failsafe_timeout(self): + return self.api.Networking.VLAN.get_failsafe_timeout(self.vlans) + + def get_if_index(self): + return self.api.Networking.VLAN.get_if_index(self.vlans) + + def get_learning_mode(self): + return self.api.Networking.VLAN.get_learning_mode(self.vlans) + + def get_mac_masquerade_address(self): + return self.api.Networking.VLAN.get_mac_masquerade_address(self.vlans) + + def get_member(self): + return self.api.Networking.VLAN.get_member(self.vlans) + + def get_mtu(self): + return self.api.Networking.VLAN.get_mtu(self.vlans) + + def get_sflow_poll_interval(self): + return self.api.Networking.VLAN.get_sflow_poll_interval(self.vlans) + + def get_sflow_poll_interval_global(self): + return self.api.Networking.VLAN.get_sflow_poll_interval_global(self.vlans) + + def get_sflow_sampling_rate(self): + return self.api.Networking.VLAN.get_sflow_sampling_rate(self.vlans) + + def get_sflow_sampling_rate_global(self): + return self.api.Networking.VLAN.get_sflow_sampling_rate_global(self.vlans) + + def get_source_check_state(self): + return self.api.Networking.VLAN.get_source_check_state(self.vlans) + + def get_true_mac_address(self): + return self.api.Networking.VLAN.get_true_mac_address(self.vlans) + + def get_vlan_id(self): + return self.api.Networking.VLAN.get_vlan_id(self.vlans) + + +class Software(object): + """Software class. + + F5 BIG-IP software class. + + Attributes: + api: iControl API instance. + """ + + def __init__(self, api): + self.api = api + + def get_all_software_status(self): + return self.api.System.SoftwareManagement.get_all_software_status() + + +class VirtualServers(object): + """Virtual servers class. + + F5 BIG-IP virtual servers class. + + Attributes: + api: iControl API instance. + virtual_servers: List of virtual servers. + """ + + def __init__(self, api, regex=None): + self.api = api + self.virtual_servers = api.LocalLB.VirtualServer.get_list() + if regex: + re_filter = re.compile(regex) + self.virtual_servers = filter(re_filter.search, self.virtual_servers) + + def get_list(self): + return self.virtual_servers + + def get_actual_hardware_acceleration(self): + return self.api.LocalLB.VirtualServer.get_actual_hardware_acceleration(self.virtual_servers) + + def get_authentication_profile(self): + return self.api.LocalLB.VirtualServer.get_authentication_profile(self.virtual_servers) + + def get_auto_lasthop(self): + return self.api.LocalLB.VirtualServer.get_auto_lasthop(self.virtual_servers) + + def get_bw_controller_policy(self): + return self.api.LocalLB.VirtualServer.get_bw_controller_policy(self.virtual_servers) + + def get_clone_pool(self): + return self.api.LocalLB.VirtualServer.get_clone_pool(self.virtual_servers) + + def get_cmp_enable_mode(self): + return self.api.LocalLB.VirtualServer.get_cmp_enable_mode(self.virtual_servers) + + def get_connection_limit(self): + return self.api.LocalLB.VirtualServer.get_connection_limit(self.virtual_servers) + + def get_connection_mirror_state(self): + return self.api.LocalLB.VirtualServer.get_connection_mirror_state(self.virtual_servers) + + def get_default_pool_name(self): + return self.api.LocalLB.VirtualServer.get_default_pool_name(self.virtual_servers) + + def get_description(self): + return self.api.LocalLB.VirtualServer.get_description(self.virtual_servers) + + def get_destination(self): + return self.api.LocalLB.VirtualServer.get_destination_v2(self.virtual_servers) + + def get_enabled_state(self): + return self.api.LocalLB.VirtualServer.get_enabled_state(self.virtual_servers) + + def get_enforced_firewall_policy(self): + return self.api.LocalLB.VirtualServer.get_enforced_firewall_policy(self.virtual_servers) + + def get_fallback_persistence_profile(self): + return self.api.LocalLB.VirtualServer.get_fallback_persistence_profile(self.virtual_servers) + + def get_fw_rule(self): + return self.api.LocalLB.VirtualServer.get_fw_rule(self.virtual_servers) + + def get_gtm_score(self): + return self.api.LocalLB.VirtualServer.get_gtm_score(self.virtual_servers) + + def get_last_hop_pool(self): + return self.api.LocalLB.VirtualServer.get_last_hop_pool(self.virtual_servers) + + def get_nat64_state(self): + return self.api.LocalLB.VirtualServer.get_nat64_state(self.virtual_servers) + + def get_object_status(self): + return self.api.LocalLB.VirtualServer.get_object_status(self.virtual_servers) + + def get_persistence_profile(self): + return self.api.LocalLB.VirtualServer.get_persistence_profile(self.virtual_servers) + + def get_profile(self): + return self.api.LocalLB.VirtualServer.get_profile(self.virtual_servers) + + def get_protocol(self): + return self.api.LocalLB.VirtualServer.get_protocol(self.virtual_servers) + + def get_rate_class(self): + return self.api.LocalLB.VirtualServer.get_rate_class(self.virtual_servers) + + def get_rate_limit(self): + return self.api.LocalLB.VirtualServer.get_rate_limit(self.virtual_servers) + + def get_rate_limit_destination_mask(self): + return self.api.LocalLB.VirtualServer.get_rate_limit_destination_mask(self.virtual_servers) + + def get_rate_limit_mode(self): + return self.api.LocalLB.VirtualServer.get_rate_limit_mode(self.virtual_servers) + + def get_rate_limit_source_mask(self): + return self.api.LocalLB.VirtualServer.get_rate_limit_source_mask(self.virtual_servers) + + def get_related_rule(self): + return self.api.LocalLB.VirtualServer.get_related_rule(self.virtual_servers) + + def get_rule(self): + return self.api.LocalLB.VirtualServer.get_rule(self.virtual_servers) + + def get_security_log_profile(self): + return self.api.LocalLB.VirtualServer.get_security_log_profile(self.virtual_servers) + + def get_snat_pool(self): + return self.api.LocalLB.VirtualServer.get_snat_pool(self.virtual_servers) + + def get_snat_type(self): + return self.api.LocalLB.VirtualServer.get_snat_type(self.virtual_servers) + + def get_source_address(self): + return self.api.LocalLB.VirtualServer.get_source_address(self.virtual_servers) + + def get_source_address_translation_lsn_pool(self): + return self.api.LocalLB.VirtualServer.get_source_address_translation_lsn_pool(self.virtual_servers) + + def get_source_address_translation_snat_pool(self): + return self.api.LocalLB.VirtualServer.get_source_address_translation_snat_pool(self.virtual_servers) + + def get_source_address_translation_type(self): + return self.api.LocalLB.VirtualServer.get_source_address_translation_type(self.virtual_servers) + + def get_source_port_behavior(self): + return self.api.LocalLB.VirtualServer.get_source_port_behavior(self.virtual_servers) + + def get_staged_firewall_policy(self): + return self.api.LocalLB.VirtualServer.get_staged_firewall_policy(self.virtual_servers) + + def get_translate_address_state(self): + return self.api.LocalLB.VirtualServer.get_translate_address_state(self.virtual_servers) + + def get_translate_port_state(self): + return self.api.LocalLB.VirtualServer.get_translate_port_state(self.virtual_servers) + + def get_type(self): + return self.api.LocalLB.VirtualServer.get_type(self.virtual_servers) + + def get_vlan(self): + return self.api.LocalLB.VirtualServer.get_vlan(self.virtual_servers) + + def get_wildmask(self): + return self.api.LocalLB.VirtualServer.get_wildmask(self.virtual_servers) + + +class Pools(object): + """Pools class. + + F5 BIG-IP pools class. + + Attributes: + api: iControl API instance. + pool_names: List of pool names. + """ + + def __init__(self, api, regex=None): + self.api = api + self.pool_names = api.LocalLB.Pool.get_list() + if regex: + re_filter = re.compile(regex) + self.pool_names = filter(re_filter.search, self.pool_names) + + def get_list(self): + return self.pool_names + + def get_action_on_service_down(self): + return self.api.LocalLB.Pool.get_action_on_service_down(self.pool_names) + + def get_active_member_count(self): + return self.api.LocalLB.Pool.get_active_member_count(self.pool_names) + + def get_aggregate_dynamic_ratio(self): + return self.api.LocalLB.Pool.get_aggregate_dynamic_ratio(self.pool_names) + + def get_allow_nat_state(self): + return self.api.LocalLB.Pool.get_allow_nat_state(self.pool_names) + + def get_allow_snat_state(self): + return self.api.LocalLB.Pool.get_allow_snat_state(self.pool_names) + + def get_client_ip_tos(self): + return self.api.LocalLB.Pool.get_client_ip_tos(self.pool_names) + + def get_client_link_qos(self): + return self.api.LocalLB.Pool.get_client_link_qos(self.pool_names) + + def get_description(self): + return self.api.LocalLB.Pool.get_description(self.pool_names) + + def get_gateway_failsafe_device(self): + return self.api.LocalLB.Pool.get_gateway_failsafe_device(self.pool_names) + + def get_ignore_persisted_weight_state(self): + return self.api.LocalLB.Pool.get_ignore_persisted_weight_state(self.pool_names) + + def get_lb_method(self): + return self.api.LocalLB.Pool.get_lb_method(self.pool_names) + + def get_member(self): + return self.api.LocalLB.Pool.get_member_v2(self.pool_names) + + def get_minimum_active_member(self): + return self.api.LocalLB.Pool.get_minimum_active_member(self.pool_names) + + def get_minimum_up_member(self): + return self.api.LocalLB.Pool.get_minimum_up_member(self.pool_names) + + def get_minimum_up_member_action(self): + return self.api.LocalLB.Pool.get_minimum_up_member_action(self.pool_names) + + def get_minimum_up_member_enabled_state(self): + return self.api.LocalLB.Pool.get_minimum_up_member_enabled_state(self.pool_names) + + def get_monitor_association(self): + return self.api.LocalLB.Pool.get_monitor_association(self.pool_names) + + def get_monitor_instance(self): + return self.api.LocalLB.Pool.get_monitor_instance(self.pool_names) + + def get_object_status(self): + return self.api.LocalLB.Pool.get_object_status(self.pool_names) + + def get_profile(self): + return self.api.LocalLB.Pool.get_profile(self.pool_names) + + def get_queue_depth_limit(self): + return self.api.LocalLB.Pool.get_queue_depth_limit(self.pool_names) + + def get_queue_on_connection_limit_state(self): + return self.api.LocalLB.Pool.get_queue_on_connection_limit_state(self.pool_names) + + def get_queue_time_limit(self): + return self.api.LocalLB.Pool.get_queue_time_limit(self.pool_names) + + def get_reselect_tries(self): + return self.api.LocalLB.Pool.get_reselect_tries(self.pool_names) + + def get_server_ip_tos(self): + return self.api.LocalLB.Pool.get_server_ip_tos(self.pool_names) + + def get_server_link_qos(self): + return self.api.LocalLB.Pool.get_server_link_qos(self.pool_names) + + def get_simple_timeout(self): + return self.api.LocalLB.Pool.get_simple_timeout(self.pool_names) + + def get_slow_ramp_time(self): + return self.api.LocalLB.Pool.get_slow_ramp_time(self.pool_names) + + +class Devices(object): + """Devices class. + + F5 BIG-IP devices class. + + Attributes: + api: iControl API instance. + devices: List of devices. + """ + + def __init__(self, api, regex=None): + self.api = api + self.devices = api.Management.Device.get_list() + if regex: + re_filter = re.compile(regex) + self.devices = filter(re_filter.search, self.devices) + + def get_list(self): + return self.devices + + def get_active_modules(self): + return self.api.Management.Device.get_active_modules(self.devices) + + def get_base_mac_address(self): + return self.api.Management.Device.get_base_mac_address(self.devices) + + def get_blade_addresses(self): + return self.api.Management.Device.get_blade_addresses(self.devices) + + def get_build(self): + return self.api.Management.Device.get_build(self.devices) + + def get_chassis_id(self): + return self.api.Management.Device.get_chassis_id(self.devices) + + def get_chassis_type(self): + return self.api.Management.Device.get_chassis_type(self.devices) + + def get_comment(self): + return self.api.Management.Device.get_comment(self.devices) + + def get_configsync_address(self): + return self.api.Management.Device.get_configsync_address(self.devices) + + def get_contact(self): + return self.api.Management.Device.get_contact(self.devices) + + def get_description(self): + return self.api.Management.Device.get_description(self.devices) + + def get_edition(self): + return self.api.Management.Device.get_edition(self.devices) + + def get_failover_state(self): + return self.api.Management.Device.get_failover_state(self.devices) + + def get_local_device(self): + return self.api.Management.Device.get_local_device() + + def get_hostname(self): + return self.api.Management.Device.get_hostname(self.devices) + + def get_inactive_modules(self): + return self.api.Management.Device.get_inactive_modules(self.devices) + + def get_location(self): + return self.api.Management.Device.get_location(self.devices) + + def get_management_address(self): + return self.api.Management.Device.get_management_address(self.devices) + + def get_marketing_name(self): + return self.api.Management.Device.get_marketing_name(self.devices) + + def get_multicast_address(self): + return self.api.Management.Device.get_multicast_address(self.devices) + + def get_optional_modules(self): + return self.api.Management.Device.get_optional_modules(self.devices) + + def get_platform_id(self): + return self.api.Management.Device.get_platform_id(self.devices) + + def get_primary_mirror_address(self): + return self.api.Management.Device.get_primary_mirror_address(self.devices) + + def get_product(self): + return self.api.Management.Device.get_product(self.devices) + + def get_secondary_mirror_address(self): + return self.api.Management.Device.get_secondary_mirror_address(self.devices) + + def get_software_version(self): + return self.api.Management.Device.get_software_version(self.devices) + + def get_timelimited_modules(self): + return self.api.Management.Device.get_timelimited_modules(self.devices) + + def get_timezone(self): + return self.api.Management.Device.get_timezone(self.devices) + + def get_unicast_addresses(self): + return self.api.Management.Device.get_unicast_addresses(self.devices) + + +class DeviceGroups(object): + """Device groups class. + + F5 BIG-IP device groups class. + + Attributes: + api: iControl API instance. + device_groups: List of device groups. + """ + + def __init__(self, api, regex=None): + self.api = api + self.device_groups = api.Management.DeviceGroup.get_list() + if regex: + re_filter = re.compile(regex) + self.device_groups = filter(re_filter.search, self.device_groups) + + def get_list(self): + return self.device_groups + + def get_all_preferred_active(self): + return self.api.Management.DeviceGroup.get_all_preferred_active(self.device_groups) + + def get_autosync_enabled_state(self): + return self.api.Management.DeviceGroup.get_autosync_enabled_state(self.device_groups) + + def get_description(self): + return self.api.Management.DeviceGroup.get_description(self.device_groups) + + def get_device(self): + return self.api.Management.DeviceGroup.get_device(self.device_groups) + + def get_full_load_on_sync_state(self): + return self.api.Management.DeviceGroup.get_full_load_on_sync_state(self.device_groups) + + def get_incremental_config_sync_size_maximum(self): + return self.api.Management.DeviceGroup.get_incremental_config_sync_size_maximum(self.device_groups) + + def get_network_failover_enabled_state(self): + return self.api.Management.DeviceGroup.get_network_failover_enabled_state(self.device_groups) + + def get_sync_status(self): + return self.api.Management.DeviceGroup.get_sync_status(self.device_groups) + + def get_type(self): + return self.api.Management.DeviceGroup.get_type(self.device_groups) + + +class TrafficGroups(object): + """Traffic groups class. + + F5 BIG-IP traffic groups class. + + Attributes: + api: iControl API instance. + traffic_groups: List of traffic groups. + """ + + def __init__(self, api, regex=None): + self.api = api + self.traffic_groups = api.Management.TrafficGroup.get_list() + if regex: + re_filter = re.compile(regex) + self.traffic_groups = filter(re_filter.search, self.traffic_groups) + + def get_list(self): + return self.traffic_groups + + def get_auto_failback_enabled_state(self): + return self.api.Management.TrafficGroup.get_auto_failback_enabled_state(self.traffic_groups) + + def get_auto_failback_time(self): + return self.api.Management.TrafficGroup.get_auto_failback_time(self.traffic_groups) + + def get_default_device(self): + return self.api.Management.TrafficGroup.get_default_device(self.traffic_groups) + + def get_description(self): + return self.api.Management.TrafficGroup.get_description(self.traffic_groups) + + def get_ha_load_factor(self): + return self.api.Management.TrafficGroup.get_ha_load_factor(self.traffic_groups) + + def get_ha_order(self): + return self.api.Management.TrafficGroup.get_ha_order(self.traffic_groups) + + def get_is_floating(self): + return self.api.Management.TrafficGroup.get_is_floating(self.traffic_groups) + + def get_mac_masquerade_address(self): + return self.api.Management.TrafficGroup.get_mac_masquerade_address(self.traffic_groups) + + def get_unit_id(self): + return self.api.Management.TrafficGroup.get_unit_id(self.traffic_groups) + + +class Rules(object): + """Rules class. + + F5 BIG-IP iRules class. + + Attributes: + api: iControl API instance. + rules: List of iRules. + """ + + def __init__(self, api, regex=None): + self.api = api + self.rules = api.LocalLB.Rule.get_list() + if regex: + re_filter = re.compile(regex) + self.traffic_groups = filter(re_filter.search, self.rules) + + def get_list(self): + return self.rules + + def get_description(self): + return self.api.LocalLB.Rule.get_description(rule_names=self.rules) + + def get_ignore_vertification(self): + return self.api.LocalLB.Rule.get_ignore_vertification(rule_names=self.rules) + + def get_verification_status(self): + return self.api.LocalLB.Rule.get_verification_status_v2(rule_names=self.rules) + + def get_definition(self): + return [x['rule_definition'] for x in self.api.LocalLB.Rule.query_rule(rule_names=self.rules)] + +class Nodes(object): + """Nodes class. + + F5 BIG-IP nodes class. + + Attributes: + api: iControl API instance. + nodes: List of nodes. + """ + + def __init__(self, api, regex=None): + self.api = api + self.nodes = api.LocalLB.NodeAddressV2.get_list() + if regex: + re_filter = re.compile(regex) + self.nodes = filter(re_filter.search, self.nodes) + + def get_list(self): + return self.nodes + + def get_address(self): + return self.api.LocalLB.NodeAddressV2.get_address(nodes=self.nodes) + + def get_connection_limit(self): + return self.api.LocalLB.NodeAddressV2.get_connection_limit(nodes=self.nodes) + + def get_description(self): + return self.api.LocalLB.NodeAddressV2.get_description(nodes=self.nodes) + + def get_dynamic_ratio(self): + return self.api.LocalLB.NodeAddressV2.get_dynamic_ratio_v2(nodes=self.nodes) + + def get_monitor_instance(self): + return self.api.LocalLB.NodeAddressV2.get_monitor_instance(nodes=self.nodes) + + def get_monitor_rule(self): + return self.api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=self.nodes) + + def get_monitor_status(self): + return self.api.LocalLB.NodeAddressV2.get_monitor_status(nodes=self.nodes) + + def get_object_status(self): + return self.api.LocalLB.NodeAddressV2.get_object_status(nodes=self.nodes) + + def get_rate_limit(self): + return self.api.LocalLB.NodeAddressV2.get_rate_limit(nodes=self.nodes) + + def get_ratio(self): + return self.api.LocalLB.NodeAddressV2.get_ratio(nodes=self.nodes) + + def get_session_status(self): + return self.api.LocalLB.NodeAddressV2.get_session_status(nodes=self.nodes) + + +class VirtualAddresses(object): + """Virtual addresses class. + + F5 BIG-IP virtual addresses class. + + Attributes: + api: iControl API instance. + virtual_addresses: List of virtual addresses. + """ + + def __init__(self, api, regex=None): + self.api = api + self.virtual_addresses = api.LocalLB.VirtualAddressV2.get_list() + if regex: + re_filter = re.compile(regex) + self.virtual_addresses = filter(re_filter.search, self.virtual_addresses) + + def get_list(self): + return self.virtual_addresses + + def get_address(self): + return self.api.LocalLB.VirtualAddressV2.get_address(self.virtual_addresses) + + def get_arp_state(self): + return self.api.LocalLB.VirtualAddressV2.get_arp_state(self.virtual_addresses) + + def get_auto_delete_state(self): + return self.api.LocalLB.VirtualAddressV2.get_auto_delete_state(self.virtual_addresses) + + def get_connection_limit(self): + return self.api.LocalLB.VirtualAddressV2.get_connection_limit(self.virtual_addresses) + + def get_description(self): + return self.api.LocalLB.VirtualAddressV2.get_description(self.virtual_addresses) + + def get_enabled_state(self): + return self.api.LocalLB.VirtualAddressV2.get_enabled_state(self.virtual_addresses) + + def get_icmp_echo_state(self): + return self.api.LocalLB.VirtualAddressV2.get_icmp_echo_state(self.virtual_addresses) + + def get_is_floating_state(self): + return self.api.LocalLB.VirtualAddressV2.get_is_floating_state(self.virtual_addresses) + + def get_netmask(self): + return self.api.LocalLB.VirtualAddressV2.get_netmask(self.virtual_addresses) + + def get_object_status(self): + return self.api.LocalLB.VirtualAddressV2.get_object_status(self.virtual_addresses) + + def get_route_advertisement_state(self): + return self.api.LocalLB.VirtualAddressV2.get_route_advertisement_state(self.virtual_addresses) + + def get_traffic_group(self): + return self.api.LocalLB.VirtualAddressV2.get_traffic_group(self.virtual_addresses) + + +class AddressClasses(object): + """Address group/class class. + + F5 BIG-IP address group/class class. + + Attributes: + api: iControl API instance. + address_classes: List of address classes. + """ + + def __init__(self, api, regex=None): + self.api = api + self.address_classes = api.LocalLB.Class.get_address_class_list() + if regex: + re_filter = re.compile(regex) + self.address_classes = filter(re_filter.search, self.address_classes) + + def get_list(self): + return self.address_classes + + def get_address_class(self): + key = self.api.LocalLB.Class.get_address_class(self.address_classes) + value = self.api.LocalLB.Class.get_address_class_member_data_value(key) + result = map(zip, [x['members'] for x in key], value) + return result + + def get_description(self): + return self.api.LocalLB.Class.get_description(self.address_classes) + + +class Certificates(object): + """Certificates class. + + F5 BIG-IP certificates class. + + Attributes: + api: iControl API instance. + certificates: List of certificate identifiers. + certificate_list: List of certificate information structures. + """ + + def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"): + self.api = api + self.certificate_list = api.Management.KeyCertificate.get_certificate_list(mode=mode) + self.certificates = [x['certificate']['cert_info']['id'] for x in self.certificate_list] + if regex: + re_filter = re.compile(regex) + self.certificates = filter(re_filter.search, self.certificates) + self.certificate_list = [x for x in self.certificate_list if x['certificate']['cert_info']['id'] in self.certificates] + + def get_list(self): + return self.certificates + + def get_certificate_list(self): + return self.certificate_list + + +class Keys(object): + """Keys class. + + F5 BIG-IP keys class. + + Attributes: + api: iControl API instance. + keys: List of key identifiers. + key_list: List of key information structures. + """ + + def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"): + self.api = api + self.key_list = api.Management.KeyCertificate.get_key_list(mode=mode) + self.keys = [x['key_info']['id'] for x in self.key_list] + if regex: + re_filter = re.compile(regex) + self.keys = filter(re_filter.search, self.keys) + self.key_list = [x for x in self.key_list if x['key_info']['id'] in self.keys] + + def get_list(self): + return self.keys + + def get_key_list(self): + return self.key_list + + +class ProfileClientSSL(object): + """Client SSL profiles class. + + F5 BIG-IP client SSL profiles class. + + Attributes: + api: iControl API instance. + profiles: List of client SSL profiles. + """ + + def __init__(self, api, regex=None): + self.api = api + self.profiles = api.LocalLB.ProfileClientSSL.get_list() + if regex: + re_filter = re.compile(regex) + self.profiles = filter(re_filter.search, self.profiles) + + def get_list(self): + return self.profiles + + def get_alert_timeout(self): + return self.api.LocalLB.ProfileClientSSL.get_alert_timeout(self.profiles) + + def get_allow_nonssl_state(self): + return self.api.LocalLB.ProfileClientSSL.get_allow_nonssl_state(self.profiles) + + def get_authenticate_depth(self): + return self.api.LocalLB.ProfileClientSSL.get_authenticate_depth(self.profiles) + + def get_authenticate_once_state(self): + return self.api.LocalLB.ProfileClientSSL.get_authenticate_once_state(self.profiles) + + def get_ca_file(self): + return self.api.LocalLB.ProfileClientSSL.get_ca_file_v2(self.profiles) + + def get_cache_size(self): + return self.api.LocalLB.ProfileClientSSL.get_cache_size(self.profiles) + + def get_cache_timeout(self): + return self.api.LocalLB.ProfileClientSSL.get_cache_timeout(self.profiles) + + def get_certificate_file(self): + return self.api.LocalLB.ProfileClientSSL.get_certificate_file_v2(self.profiles) + + def get_chain_file(self): + return self.api.LocalLB.ProfileClientSSL.get_chain_file_v2(self.profiles) + + def get_cipher_list(self): + return self.api.LocalLB.ProfileClientSSL.get_cipher_list(self.profiles) + + def get_client_certificate_ca_file(self): + return self.api.LocalLB.ProfileClientSSL.get_client_certificate_ca_file_v2(self.profiles) + + def get_crl_file(self): + return self.api.LocalLB.ProfileClientSSL.get_crl_file_v2(self.profiles) + + def get_default_profile(self): + return self.api.LocalLB.ProfileClientSSL.get_default_profile(self.profiles) + + def get_description(self): + return self.api.LocalLB.ProfileClientSSL.get_description(self.profiles) + + def get_forward_proxy_ca_certificate_file(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_certificate_file(self.profiles) + + def get_forward_proxy_ca_key_file(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_key_file(self.profiles) + + def get_forward_proxy_ca_passphrase(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_passphrase(self.profiles) + + def get_forward_proxy_certificate_extension_include(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_extension_include(self.profiles) + + def get_forward_proxy_certificate_lifespan(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_lifespan(self.profiles) + + def get_forward_proxy_enabled_state(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_enabled_state(self.profiles) + + def get_forward_proxy_lookup_by_ipaddr_port_state(self): + return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_lookup_by_ipaddr_port_state(self.profiles) + + def get_handshake_timeout(self): + return self.api.LocalLB.ProfileClientSSL.get_handshake_timeout(self.profiles) + + def get_key_file(self): + return self.api.LocalLB.ProfileClientSSL.get_key_file_v2(self.profiles) + + def get_modssl_emulation_state(self): + return self.api.LocalLB.ProfileClientSSL.get_modssl_emulation_state(self.profiles) + + def get_passphrase(self): + return self.api.LocalLB.ProfileClientSSL.get_passphrase(self.profiles) + + def get_peer_certification_mode(self): + return self.api.LocalLB.ProfileClientSSL.get_peer_certification_mode(self.profiles) + + def get_profile_mode(self): + return self.api.LocalLB.ProfileClientSSL.get_profile_mode(self.profiles) + + def get_renegotiation_maximum_record_delay(self): + return self.api.LocalLB.ProfileClientSSL.get_renegotiation_maximum_record_delay(self.profiles) + + def get_renegotiation_period(self): + return self.api.LocalLB.ProfileClientSSL.get_renegotiation_period(self.profiles) + + def get_renegotiation_state(self): + return self.api.LocalLB.ProfileClientSSL.get_renegotiation_state(self.profiles) + + def get_renegotiation_throughput(self): + return self.api.LocalLB.ProfileClientSSL.get_renegotiation_throughput(self.profiles) + + def get_retain_certificate_state(self): + return self.api.LocalLB.ProfileClientSSL.get_retain_certificate_state(self.profiles) + + def get_secure_renegotiation_mode(self): + return self.api.LocalLB.ProfileClientSSL.get_secure_renegotiation_mode(self.profiles) + + def get_server_name(self): + return self.api.LocalLB.ProfileClientSSL.get_server_name(self.profiles) + + def get_session_ticket_state(self): + return self.api.LocalLB.ProfileClientSSL.get_session_ticket_state(self.profiles) + + def get_sni_default_state(self): + return self.api.LocalLB.ProfileClientSSL.get_sni_default_state(self.profiles) + + def get_sni_require_state(self): + return self.api.LocalLB.ProfileClientSSL.get_sni_require_state(self.profiles) + + def get_ssl_option(self): + return self.api.LocalLB.ProfileClientSSL.get_ssl_option(self.profiles) + + def get_strict_resume_state(self): + return self.api.LocalLB.ProfileClientSSL.get_strict_resume_state(self.profiles) + + def get_unclean_shutdown_state(self): + return self.api.LocalLB.ProfileClientSSL.get_unclean_shutdown_state(self.profiles) + + def get_is_base_profile(self): + return self.api.LocalLB.ProfileClientSSL.is_base_profile(self.profiles) + + def get_is_system_profile(self): + return self.api.LocalLB.ProfileClientSSL.is_system_profile(self.profiles) + + +class SystemInfo(object): + """System information class. + + F5 BIG-IP system information class. + + Attributes: + api: iControl API instance. + """ + + def __init__(self, api): + self.api = api + + def get_base_mac_address(self): + return self.api.System.SystemInfo.get_base_mac_address() + + def get_blade_temperature(self): + return self.api.System.SystemInfo.get_blade_temperature() + + def get_chassis_slot_information(self): + return self.api.System.SystemInfo.get_chassis_slot_information() + + def get_globally_unique_identifier(self): + return self.api.System.SystemInfo.get_globally_unique_identifier() + + def get_group_id(self): + return self.api.System.SystemInfo.get_group_id() + + def get_hardware_information(self): + return self.api.System.SystemInfo.get_hardware_information() + + def get_marketing_name(self): + return self.api.System.SystemInfo.get_marketing_name() + + def get_product_information(self): + return self.api.System.SystemInfo.get_product_information() + + def get_pva_version(self): + return self.api.System.SystemInfo.get_pva_version() + + def get_system_id(self): + return self.api.System.SystemInfo.get_system_id() + + def get_system_information(self): + return self.api.System.SystemInfo.get_system_information() + + def get_time(self): + return self.api.System.SystemInfo.get_time() + + def get_time_zone(self): + return self.api.System.SystemInfo.get_time_zone() + + def get_uptime(self): + return self.api.System.SystemInfo.get_uptime() + + +def generate_dict(api_obj, fields): + result_dict = {} + lists = [] + supported_fields = [] + if api_obj.get_list(): + for field in fields: + try: + api_response = getattr(api_obj, "get_" + field)() + except MethodNotFound: + pass + else: + lists.append(api_response) + supported_fields.append(field) + for i, j in enumerate(api_obj.get_list()): + temp = {} + temp.update([(item[0], item[1][i]) for item in zip(supported_fields, lists)]) + result_dict[j] = temp + return result_dict + +def generate_simple_dict(api_obj, fields): + result_dict = {} + for field in fields: + try: + api_response = getattr(api_obj, "get_" + field)() + except MethodNotFound: + pass + else: + result_dict[field] = api_response + return result_dict + +def generate_interface_dict(f5, regex): + interfaces = Interfaces(f5.get_api(), regex) + fields = ['active_media', 'actual_flow_control', 'bundle_state', + 'description', 'dual_media_state', 'enabled_state', 'if_index', + 'learning_mode', 'lldp_admin_status', 'lldp_tlvmap', + 'mac_address', 'media', 'media_option', 'media_option_sfp', + 'media_sfp', 'media_speed', 'media_status', 'mtu', + 'phy_master_slave_mode', 'prefer_sfp_state', 'flow_control', + 'sflow_poll_interval', 'sflow_poll_interval_global', + 'sfp_media_state', 'stp_active_edge_port_state', + 'stp_enabled_state', 'stp_link_type', + 'stp_protocol_detection_reset_state'] + return generate_dict(interfaces, fields) + +def generate_self_ip_dict(f5, regex): + self_ips = SelfIPs(f5.get_api(), regex) + fields = ['address', 'allow_access_list', 'description', + 'enforced_firewall_policy', 'floating_state', 'fw_rule', + 'netmask', 'staged_firewall_policy', 'traffic_group', + 'vlan', 'is_traffic_group_inherited'] + return generate_dict(self_ips, fields) + +def generate_trunk_dict(f5, regex): + trunks = Trunks(f5.get_api(), regex) + fields = ['active_lacp_state', 'configured_member_count', 'description', + 'distribution_hash_option', 'interface', 'lacp_enabled_state', + 'lacp_timeout_option', 'link_selection_policy', 'media_speed', + 'media_status', 'operational_member_count', 'stp_enabled_state', + 'stp_protocol_detection_reset_state'] + return generate_dict(trunks, fields) + +def generate_vlan_dict(f5, regex): + vlans = Vlans(f5.get_api(), regex) + fields = ['auto_lasthop', 'cmp_hash_algorithm', 'description', + 'dynamic_forwarding', 'failsafe_action', 'failsafe_state', + 'failsafe_timeout', 'if_index', 'learning_mode', + 'mac_masquerade_address', 'member', 'mtu', + 'sflow_poll_interval', 'sflow_poll_interval_global', + 'sflow_sampling_rate', 'sflow_sampling_rate_global', + 'source_check_state', 'true_mac_address', 'vlan_id'] + return generate_dict(vlans, fields) + +def generate_vs_dict(f5, regex): + virtual_servers = VirtualServers(f5.get_api(), regex) + fields = ['actual_hardware_acceleration', 'authentication_profile', + 'auto_lasthop', 'bw_controller_policy', 'clone_pool', + 'cmp_enable_mode', 'connection_limit', 'connection_mirror_state', + 'default_pool_name', 'description', 'destination', + 'enabled_state', 'enforced_firewall_policy', + 'fallback_persistence_profile', 'fw_rule', 'gtm_score', + 'last_hop_pool', 'nat64_state', 'object_status', + 'persistence_profile', 'profile', 'protocol', + 'rate_class', 'rate_limit', 'rate_limit_destination_mask', + 'rate_limit_mode', 'rate_limit_source_mask', 'related_rule', + 'rule', 'security_log_profile', 'snat_pool', 'snat_type', + 'source_address', 'source_address_translation_lsn_pool', + 'source_address_translation_snat_pool', + 'source_address_translation_type', 'source_port_behavior', + 'staged_firewall_policy', 'translate_address_state', + 'translate_port_state', 'type', 'vlan', 'wildmask'] + return generate_dict(virtual_servers, fields) + +def generate_pool_dict(f5, regex): + pools = Pools(f5.get_api(), regex) + fields = ['action_on_service_down', 'active_member_count', + 'aggregate_dynamic_ratio', 'allow_nat_state', + 'allow_snat_state', 'client_ip_tos', 'client_link_qos', + 'description', 'gateway_failsafe_device', + 'ignore_persisted_weight_state', 'lb_method', 'member', + 'minimum_active_member', 'minimum_up_member', + 'minimum_up_member_action', 'minimum_up_member_enabled_state', + 'monitor_association', 'monitor_instance', 'object_status', + 'profile', 'queue_depth_limit', + 'queue_on_connection_limit_state', 'queue_time_limit', + 'reselect_tries', 'server_ip_tos', 'server_link_qos', + 'simple_timeout', 'slow_ramp_time'] + return generate_dict(pools, fields) + +def generate_device_dict(f5, regex): + devices = Devices(f5.get_api(), regex) + fields = ['active_modules', 'base_mac_address', 'blade_addresses', + 'build', 'chassis_id', 'chassis_type', 'comment', + 'configsync_address', 'contact', 'description', 'edition', + 'failover_state', 'hostname', 'inactive_modules', 'location', + 'management_address', 'marketing_name', 'multicast_address', + 'optional_modules', 'platform_id', 'primary_mirror_address', + 'product', 'secondary_mirror_address', 'software_version', + 'timelimited_modules', 'timezone', 'unicast_addresses'] + return generate_dict(devices, fields) + +def generate_device_group_dict(f5, regex): + device_groups = DeviceGroups(f5.get_api(), regex) + fields = ['all_preferred_active', 'autosync_enabled_state','description', + 'device', 'full_load_on_sync_state', + 'incremental_config_sync_size_maximum', + 'network_failover_enabled_state', 'sync_status', 'type'] + return generate_dict(device_groups, fields) + +def generate_traffic_group_dict(f5, regex): + traffic_groups = TrafficGroups(f5.get_api(), regex) + fields = ['auto_failback_enabled_state', 'auto_failback_time', + 'default_device', 'description', 'ha_load_factor', + 'ha_order', 'is_floating', 'mac_masquerade_address', + 'unit_id'] + return generate_dict(traffic_groups, fields) + +def generate_rule_dict(f5, regex): + rules = Rules(f5.get_api(), regex) + fields = ['definition', 'description', 'ignore_vertification', + 'verification_status'] + return generate_dict(rules, fields) + +def generate_node_dict(f5, regex): + nodes = Nodes(f5.get_api(), regex) + fields = ['address', 'connection_limit', 'description', 'dynamic_ratio', + 'monitor_instance', 'monitor_rule', 'monitor_status', + 'object_status', 'rate_limit', 'ratio', 'session_status'] + return generate_dict(nodes, fields) + +def generate_virtual_address_dict(f5, regex): + virtual_addresses = VirtualAddresses(f5.get_api(), regex) + fields = ['address', 'arp_state', 'auto_delete_state', 'connection_limit', + 'description', 'enabled_state', 'icmp_echo_state', + 'is_floating_state', 'netmask', 'object_status', + 'route_advertisement_state', 'traffic_group'] + return generate_dict(virtual_addresses, fields) + +def generate_address_class_dict(f5, regex): + address_classes = AddressClasses(f5.get_api(), regex) + fields = ['address_class', 'description'] + return generate_dict(address_classes, fields) + +def generate_certificate_dict(f5, regex): + certificates = Certificates(f5.get_api(), regex) + return dict(zip(certificates.get_list(), certificates.get_certificate_list())) + +def generate_key_dict(f5, regex): + keys = Keys(f5.get_api(), regex) + return dict(zip(keys.get_list(), keys.get_key_list())) + +def generate_client_ssl_profile_dict(f5, regex): + profiles = ProfileClientSSL(f5.get_api(), regex) + fields = ['alert_timeout', 'allow_nonssl_state', 'authenticate_depth', + 'authenticate_once_state', 'ca_file', 'cache_size', + 'cache_timeout', 'certificate_file', 'chain_file', + 'cipher_list', 'client_certificate_ca_file', 'crl_file', + 'default_profile', 'description', + 'forward_proxy_ca_certificate_file', 'forward_proxy_ca_key_file', + 'forward_proxy_ca_passphrase', + 'forward_proxy_certificate_extension_include', + 'forward_proxy_certificate_lifespan', + 'forward_proxy_enabled_state', + 'forward_proxy_lookup_by_ipaddr_port_state', 'handshake_timeout', + 'key_file', 'modssl_emulation_state', 'passphrase', + 'peer_certification_mode', 'profile_mode', + 'renegotiation_maximum_record_delay', 'renegotiation_period', + 'renegotiation_state', 'renegotiation_throughput', + 'retain_certificate_state', 'secure_renegotiation_mode', + 'server_name', 'session_ticket_state', 'sni_default_state', + 'sni_require_state', 'ssl_option', 'strict_resume_state', + 'unclean_shutdown_state', 'is_base_profile', 'is_system_profile'] + return generate_dict(profiles, fields) + +def generate_system_info_dict(f5): + system_info = SystemInfo(f5.get_api()) + fields = ['base_mac_address', + 'blade_temperature', 'chassis_slot_information', + 'globally_unique_identifier', 'group_id', + 'hardware_information', + 'marketing_name', + 'product_information', 'pva_version', 'system_id', + 'system_information', 'time', + 'time_zone', 'uptime'] + return generate_simple_dict(system_info, fields) + +def generate_software_list(f5): + software = Software(f5.get_api()) + software_list = software.get_all_software_status() + return software_list + + +def main(): + module = AnsibleModule( + argument_spec = dict( + server = dict(type='str', required=True), + user = dict(type='str', required=True), + password = dict(type='str', required=True), + session = dict(type='bool', default=False), + include = dict(type='list', required=True), + filter = dict(type='str', required=False), + ) + ) + + if not bigsuds_found: + module.fail_json(msg="the python suds and bigsuds modules is required") + + server = module.params['server'] + user = module.params['user'] + password = module.params['password'] + session = module.params['session'] + fact_filter = module.params['filter'] + if fact_filter: + regex = fnmatch.translate(fact_filter) + else: + regex = None + include = map(lambda x: x.lower(), module.params['include']) + valid_includes = ('address_class', 'certificate', 'client_ssl_profile', + 'device_group', 'interface', 'key', 'node', 'pool', + 'rule', 'self_ip', 'software', 'system_info', + 'traffic_group', 'trunk', 'virtual_address', + 'virtual_server', 'vlan') + include_test = map(lambda x: x in valid_includes, include) + if not all(include_test): + module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include))) + + try: + facts = {} + + if len(include) > 0: + f5 = F5(server, user, password, session) + saved_active_folder = f5.get_active_folder() + saved_recursive_query_state = f5.get_recursive_query_state() + if saved_active_folder != "/": + f5.set_active_folder("/") + if saved_recursive_query_state != "STATE_ENABLED": + f5.enable_recursive_query_state() + + if 'interface' in include: + facts['interface'] = generate_interface_dict(f5, regex) + if 'self_ip' in include: + facts['self_ip'] = generate_self_ip_dict(f5, regex) + if 'trunk' in include: + facts['trunk'] = generate_trunk_dict(f5, regex) + if 'vlan' in include: + facts['vlan'] = generate_vlan_dict(f5, regex) + if 'virtual_server' in include: + facts['virtual_server'] = generate_vs_dict(f5, regex) + if 'pool' in include: + facts['pool'] = generate_pool_dict(f5, regex) + if 'device' in include: + facts['device'] = generate_device_dict(f5, regex) + if 'device_group' in include: + facts['device_group'] = generate_device_group_dict(f5, regex) + if 'traffic_group' in include: + facts['traffic_group'] = generate_traffic_group_dict(f5, regex) + if 'rule' in include: + facts['rule'] = generate_rule_dict(f5, regex) + if 'node' in include: + facts['node'] = generate_node_dict(f5, regex) + if 'virtual_address' in include: + facts['virtual_address'] = generate_virtual_address_dict(f5, regex) + if 'address_class' in include: + facts['address_class'] = generate_address_class_dict(f5, regex) + if 'software' in include: + facts['software'] = generate_software_list(f5) + if 'certificate' in include: + facts['certificate'] = generate_certificate_dict(f5, regex) + if 'key' in include: + facts['key'] = generate_key_dict(f5, regex) + if 'client_ssl_profile' in include: + facts['client_ssl_profile'] = generate_client_ssl_profile_dict(f5, regex) + if 'system_info' in include: + facts['system_info'] = generate_system_info_dict(f5) + + # restore saved state + if saved_active_folder and saved_active_folder != "/": + f5.set_active_folder(saved_active_folder) + if saved_recursive_query_state and \ + saved_recursive_query_state != "STATE_ENABLED": + f5.set_recursive_query_state(saved_recursive_query_state) + + result = {'ansible_facts': facts} + + except Exception, e: + module.fail_json(msg="received exception: %s\ntraceback: %s" % (e, traceback.format_exc())) + + module.exit_json(**result) + +# include magic from lib/ansible/module_common.py +#<> +main() + diff --git a/lib/ansible/modules/extras/net_infrastructure/bigip_monitor_http b/lib/ansible/modules/extras/net_infrastructure/bigip_monitor_http new file mode 100644 index 00000000000..62823f86579 --- /dev/null +++ b/lib/ansible/modules/extras/net_infrastructure/bigip_monitor_http @@ -0,0 +1,464 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, serge van Ginderachter +# based on Matt Hite's bigip_pool module +# (c) 2013, Matt Hite +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: bigip_monitor_http +short_description: "Manages F5 BIG-IP LTM http monitors" +description: + - "Manages F5 BIG-IP LTM monitors via iControl SOAP API" +version_added: "1.4" +author: Serge van Ginderachter +notes: + - "Requires BIG-IP software version >= 11" + - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" + - "Best run as a local_action in your playbook" + - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx" +requirements: + - bigsuds +options: + server: + description: + - BIG-IP host + required: true + default: null + user: + description: + - BIG-IP username + required: true + default: null + password: + description: + - BIG-IP password + required: true + default: null + state: + description: + - Monitor state + required: false + default: 'present' + choices: ['present', 'absent'] + name: + description: + - Monitor name + required: true + default: null + aliases: ['monitor'] + partition: + description: + - Partition for the monitor + required: false + default: 'Common' + parent: + description: + - The parent template of this monitor template + required: false + default: 'http' + parent_partition: + description: + - Partition for the parent monitor + required: false + default: 'Common' + send: + description: + - The send string for the monitor call + required: true + default: none + receive: + description: + - The receive string for the monitor call + required: true + default: none + receive_disable: + description: + - The receive disable string for the monitor call + required: true + default: none + ip: + description: + - IP address part of the ipport definition. The default API setting + is "0.0.0.0". + required: false + default: none + port: + description: + - port address part op the ipport definition. The default API + setting is 0. + required: false + default: none + interval: + description: + - The interval specifying how frequently the monitor instance + of this template will run. By default, this interval is used for up and + down states. The default API setting is 5. + required: false + default: none + timeout: + description: + - The number of seconds in which the node or service must respond to + the monitor request. If the target responds within the set time + period, it is considered up. If the target does not respond within + the set time period, it is considered down. You can change this + number to any number you want, however, it should be 3 times the + interval number of seconds plus 1 second. The default API setting + is 16. + required: false + default: none + time_until_up: + description: + - Specifies the amount of time in seconds after the first successful + response before a node will be marked up. A value of 0 will cause a + node to be marked up immediately after a valid response is received + from the node. The default API setting is 0. + required: false + default: none +''' + +EXAMPLES = ''' +- name: BIGIP F5 | Create HTTP Monitor + local_action: + module: bigip_monitor_http + state: present + server: "{{ f5server }}" + user: "{{ f5user }}" + password: "{{ f5password }}" + name: "{{ item.monitorname }}" + send: "{{ item.send }}" + receive: "{{ item.receive }}" + with_items: f5monitors +- name: BIGIP F5 | Remove HTTP Monitor + local_action: + module: bigip_monitor_http + state: absent + server: "{{ f5server }}" + user: "{{ f5user }}" + password: "{{ f5password }}" + name: "{{ monitorname }}" +''' + +try: + import bigsuds +except ImportError: + bigsuds_found = False +else: + bigsuds_found = True + +TEMPLATE_TYPE = 'TTYPE_HTTP' +DEFAULT_PARENT_TYPE = 'http' + + +# =========================================== +# bigip_monitor module generic methods. +# these should be re-useable for other monitor types +# + +def bigip_api(bigip, user, password): + + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) + return api + + +def check_monitor_exists(module, api, monitor, parent): + + # hack to determine if monitor exists + result = False + try: + ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0] + parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0] + if ttype == TEMPLATE_TYPE and parent == parent2: + result = True + else: + module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent)) + except bigsuds.OperationFailed, e: + if "was not found" in str(e): + result = False + else: + # genuine exception + raise + return result + + +def create_monitor(api, monitor, template_attributes): + + try: + api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes]) + except bigsuds.OperationFailed, e: + if "already exists" in str(e): + return False + else: + # genuine exception + raise + return True + + +def delete_monitor(api, monitor): + + try: + api.LocalLB.Monitor.delete_template(template_names=[monitor]) + except bigsuds.OperationFailed, e: + # maybe it was deleted since we checked + if "was not found" in str(e): + return False + else: + # genuine exception + raise + return True + + +def check_string_property(api, monitor, str_property): + + try: + return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0] + except bigsuds.OperationFailed, e: + # happens in check mode if not created yet + if "was not found" in str(e): + return True + else: + # genuine exception + raise + + +def set_string_property(api, monitor, str_property): + + api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property]) + + +def check_integer_property(api, monitor, int_property): + + try: + return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0] + except bigsuds.OperationFailed, e: + # happens in check mode if not created yet + if "was not found" in str(e): + return True + else: + # genuine exception + raise + + + +def set_integer_property(api, monitor, int_property): + + api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property]) + + +def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): + + changed = False + for str_property in template_string_properties: + if str_property['value'] is not None and not check_string_property(api, monitor, str_property): + if not module.check_mode: + set_string_property(api, monitor, str_property) + changed = True + for int_property in template_integer_properties: + if int_property['value'] is not None and not check_integer_property(api, monitor, int_property): + if not module.check_mode: + set_integer_property(api, monitor, int_property) + changed = True + + return changed + + +def get_ipport(api, monitor): + + return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0] + + +def set_ipport(api, monitor, ipport): + + try: + api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport]) + return True, "" + + except bigsuds.OperationFailed, e: + if "Cannot modify the address type of monitor" in str(e): + return False, "Cannot modify the address type of monitor if already assigned to a pool." + else: + # genuine exception + raise + +# =========================================== +# main loop +# +# writing a module for other monitor types should +# only need an updated main() (and monitor specific functions) + +def main(): + + # begin monitor specific stuff + + module = AnsibleModule( + argument_spec = dict( + server = dict(required=True), + user = dict(required=True), + password = dict(required=True), + partition = dict(default='Common'), + state = dict(default='present', choices=['present', 'absent']), + name = dict(required=True), + parent = dict(default=DEFAULT_PARENT_TYPE), + parent_partition = dict(default='Common'), + send = dict(required=False), + receive = dict(required=False), + receive_disable = dict(required=False), + ip = dict(required=False), + port = dict(required=False, type='int'), + interval = dict(required=False, type='int'), + timeout = dict(required=False, type='int'), + time_until_up = dict(required=False, type='int', default=0) + ), + supports_check_mode=True + ) + + server = module.params['server'] + user = module.params['user'] + password = module.params['password'] + partition = module.params['partition'] + parent_partition = module.params['parent_partition'] + state = module.params['state'] + name = module.params['name'] + parent = "/%s/%s" % (parent_partition, module.params['parent']) + monitor = "/%s/%s" % (partition, name) + send = module.params['send'] + receive = module.params['receive'] + receive_disable = module.params['receive_disable'] + ip = module.params['ip'] + port = module.params['port'] + interval = module.params['interval'] + timeout = module.params['timeout'] + time_until_up = module.params['time_until_up'] + + # end monitor specific stuff + + if not bigsuds_found: + module.fail_json(msg="the python bigsuds module is required") + api = bigip_api(server, user, password) + monitor_exists = check_monitor_exists(module, api, monitor, parent) + + + # ipport is a special setting + if monitor_exists: # make sure to not update current settings if not asked + cur_ipport = get_ipport(api, monitor) + if ip is None: + ip = cur_ipport['ipport']['address'] + if port is None: + port = cur_ipport['ipport']['port'] + else: # use API defaults if not defined to create it + if interval is None: + interval = 5 + if timeout is None: + timeout = 16 + if ip is None: + ip = '0.0.0.0' + if port is None: + port = 0 + if send is None: + send = '' + if receive is None: + receive = '' + if receive_disable is None: + receive_disable = '' + + # define and set address type + if ip == '0.0.0.0' and port == 0: + address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT' + elif ip == '0.0.0.0' and port != 0: + address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT' + elif ip != '0.0.0.0' and port != 0: + address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT' + else: + address_type = 'ATYPE_UNSET' + + ipport = {'address_type': address_type, + 'ipport': {'address': ip, + 'port': port}} + + template_attributes = {'parent_template': parent, + 'interval': interval, + 'timeout': timeout, + 'dest_ipport': ipport, + 'is_read_only': False, + 'is_directly_usable': True} + + # monitor specific stuff + template_string_properties = [{'type': 'STYPE_SEND', + 'value': send}, + {'type': 'STYPE_RECEIVE', + 'value': receive}, + {'type': 'STYPE_RECEIVE_DRAIN', + 'value': receive_disable}] + + template_integer_properties = [{'type': 'ITYPE_INTERVAL', + 'value': interval}, + {'type': 'ITYPE_TIMEOUT', + 'value': timeout}, + {'type': 'ITYPE_TIME_UNTIL_UP', + 'value': time_until_up}] + + # main logic, monitor generic + + try: + result = {'changed': False} # default + + + if state == 'absent': + if monitor_exists: + if not module.check_mode: + # possible race condition if same task + # on other node deleted it first + result['changed'] |= delete_monitor(api, monitor) + else: + result['changed'] |= True + + else: # state present + ## check for monitor itself + if not monitor_exists: # create it + if not module.check_mode: + # again, check changed status here b/c race conditions + # if other task already created it + result['changed'] |= create_monitor(api, monitor, template_attributes) + else: + result['changed'] |= True + + ## check for monitor parameters + # whether it already existed, or was just created, now update + # the update functions need to check for check mode but + # cannot update settings if it doesn't exist which happens in check mode + result['changed'] |= update_monitor_properties(api, module, monitor, + template_string_properties, + template_integer_properties) + + # we just have to update the ipport if monitor already exists and it's different + if monitor_exists and cur_ipport != ipport: + set_ipport(api, monitor, ipport) + result['changed'] |= True + #else: monitor doesn't exist (check mode) or ipport is already ok + + + except Exception, e: + module.fail_json(msg="received exception: %s" % e) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() + diff --git a/lib/ansible/modules/extras/net_infrastructure/bigip_monitor_tcp b/lib/ansible/modules/extras/net_infrastructure/bigip_monitor_tcp new file mode 100644 index 00000000000..8b89a0c6113 --- /dev/null +++ b/lib/ansible/modules/extras/net_infrastructure/bigip_monitor_tcp @@ -0,0 +1,489 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, serge van Ginderachter +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: bigip_monitor_tcp +short_description: "Manages F5 BIG-IP LTM tcp monitors" +description: + - "Manages F5 BIG-IP LTM tcp monitors via iControl SOAP API" +version_added: "1.4" +author: Serge van Ginderachter +notes: + - "Requires BIG-IP software version >= 11" + - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" + - "Best run as a local_action in your playbook" + - "Monitor API documentation: https://devcentral.f5.com/wiki/iControl.LocalLB__Monitor.ashx" +requirements: + - bigsuds +options: + server: + description: + - BIG-IP host + required: true + default: null + user: + description: + - BIG-IP username + required: true + default: null + password: + description: + - BIG-IP password + required: true + default: null + state: + description: + - Monitor state + required: false + default: 'present' + choices: ['present', 'absent'] + name: + description: + - Monitor name + required: true + default: null + aliases: ['monitor'] + partition: + description: + - Partition for the monitor + required: false + default: 'Common' + type: + description: + - The template type of this monitor template + required: false + default: 'tcp' + choices: [ 'TTYPE_TCP', 'TTYPE_TCP_ECHO', 'TTYPE_TCP_HALF_OPEN'] + parent: + description: + - The parent template of this monitor template + required: false + default: 'tcp' + choices: [ 'tcp', 'tcp_echo', 'tcp_half_open'] + parent_partition: + description: + - Partition for the parent monitor + required: false + default: 'Common' + send: + description: + - The send string for the monitor call + required: true + default: none + receive: + description: + - The receive string for the monitor call + required: true + default: none + ip: + description: + - IP address part of the ipport definition. The default API setting + is "0.0.0.0". + required: false + default: none + port: + description: + - port address part op the ipport definition. The default API + setting is 0. + required: false + default: none + interval: + description: + - The interval specifying how frequently the monitor instance + of this template will run. By default, this interval is used for up and + down states. The default API setting is 5. + required: false + default: none + timeout: + description: + - The number of seconds in which the node or service must respond to + the monitor request. If the target responds within the set time + period, it is considered up. If the target does not respond within + the set time period, it is considered down. You can change this + number to any number you want, however, it should be 3 times the + interval number of seconds plus 1 second. The default API setting + is 16. + required: false + default: none + time_until_up: + description: + - Specifies the amount of time in seconds after the first successful + response before a node will be marked up. A value of 0 will cause a + node to be marked up immediately after a valid response is received + from the node. The default API setting is 0. + required: false + default: none +''' + +EXAMPLES = ''' + +- name: BIGIP F5 | Create TCP Monitor + local_action: + module: bigip_monitor_tcp + state: present + server: "{{ f5server }}" + user: "{{ f5user }}" + password: "{{ f5password }}" + name: "{{ item.monitorname }}" + type: tcp + send: "{{ item.send }}" + receive: "{{ item.receive }}" + with_items: f5monitors-tcp +- name: BIGIP F5 | Create TCP half open Monitor + local_action: + module: bigip_monitor_tcp + state: present + server: "{{ f5server }}" + user: "{{ f5user }}" + password: "{{ f5password }}" + name: "{{ item.monitorname }}" + type: tcp + send: "{{ item.send }}" + receive: "{{ item.receive }}" + with_items: f5monitors-halftcp +- name: BIGIP F5 | Remove TCP Monitor + local_action: + module: bigip_monitor_tcp + state: absent + server: "{{ f5server }}" + user: "{{ f5user }}" + password: "{{ f5password }}" + name: "{{ monitorname }}" + with_flattened: + - f5monitors-tcp + - f5monitors-halftcp + +''' + +try: + import bigsuds +except ImportError: + bigsuds_found = False +else: + bigsuds_found = True + +TEMPLATE_TYPE = DEFAULT_TEMPLATE_TYPE = 'TTYPE_TCP' +TEMPLATE_TYPE_CHOICES = ['tcp', 'tcp_echo', 'tcp_half_open'] +DEFAULT_PARENT = DEFAULT_TEMPLATE_TYPE_CHOICE = DEFAULT_TEMPLATE_TYPE.replace('TTYPE_', '').lower() + + +# =========================================== +# bigip_monitor module generic methods. +# these should be re-useable for other monitor types +# + +def bigip_api(bigip, user, password): + + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) + return api + + +def check_monitor_exists(module, api, monitor, parent): + + # hack to determine if monitor exists + result = False + try: + ttype = api.LocalLB.Monitor.get_template_type(template_names=[monitor])[0] + parent2 = api.LocalLB.Monitor.get_parent_template(template_names=[monitor])[0] + if ttype == TEMPLATE_TYPE and parent == parent2: + result = True + else: + module.fail_json(msg='Monitor already exists, but has a different type (%s) or parent(%s)' % (ttype, parent)) + except bigsuds.OperationFailed, e: + if "was not found" in str(e): + result = False + else: + # genuine exception + raise + return result + + +def create_monitor(api, monitor, template_attributes): + + try: + api.LocalLB.Monitor.create_template(templates=[{'template_name': monitor, 'template_type': TEMPLATE_TYPE}], template_attributes=[template_attributes]) + except bigsuds.OperationFailed, e: + if "already exists" in str(e): + return False + else: + # genuine exception + raise + return True + + +def delete_monitor(api, monitor): + + try: + api.LocalLB.Monitor.delete_template(template_names=[monitor]) + except bigsuds.OperationFailed, e: + # maybe it was deleted since we checked + if "was not found" in str(e): + return False + else: + # genuine exception + raise + return True + + +def check_string_property(api, monitor, str_property): + + try: + return str_property == api.LocalLB.Monitor.get_template_string_property([monitor], [str_property['type']])[0] + except bigsuds.OperationFailed, e: + # happens in check mode if not created yet + if "was not found" in str(e): + return True + else: + # genuine exception + raise + return True + + +def set_string_property(api, monitor, str_property): + + api.LocalLB.Monitor.set_template_string_property(template_names=[monitor], values=[str_property]) + + +def check_integer_property(api, monitor, int_property): + + try: + return int_property == api.LocalLB.Monitor.get_template_integer_property([monitor], [int_property['type']])[0] + except bigsuds.OperationFailed, e: + # happens in check mode if not created yet + if "was not found" in str(e): + return True + else: + # genuine exception + raise + return True + + +def set_integer_property(api, monitor, int_property): + + api.LocalLB.Monitor.set_template_int_property(template_names=[monitor], values=[int_property]) + + +def update_monitor_properties(api, module, monitor, template_string_properties, template_integer_properties): + + changed = False + for str_property in template_string_properties: + if str_property['value'] is not None and not check_string_property(api, monitor, str_property): + if not module.check_mode: + set_string_property(api, monitor, str_property) + changed = True + for int_property in template_integer_properties: + if int_property['value'] is not None and not check_integer_property(api, monitor, int_property): + if not module.check_mode: + set_integer_property(api, monitor, int_property) + changed = True + + return changed + + +def get_ipport(api, monitor): + + return api.LocalLB.Monitor.get_template_destination(template_names=[monitor])[0] + + +def set_ipport(api, monitor, ipport): + + try: + api.LocalLB.Monitor.set_template_destination(template_names=[monitor], destinations=[ipport]) + return True, "" + + except bigsuds.OperationFailed, e: + if "Cannot modify the address type of monitor" in str(e): + return False, "Cannot modify the address type of monitor if already assigned to a pool." + else: + # genuine exception + raise + +# =========================================== +# main loop +# +# writing a module for other monitor types should +# only need an updated main() (and monitor specific functions) + +def main(): + + # begin monitor specific stuff + + module = AnsibleModule( + argument_spec = dict( + server = dict(required=True), + user = dict(required=True), + password = dict(required=True), + partition = dict(default='Common'), + state = dict(default='present', choices=['present', 'absent']), + name = dict(required=True), + type = dict(default=DEFAULT_TEMPLATE_TYPE_CHOICE, choices=TEMPLATE_TYPE_CHOICES), + parent = dict(default=DEFAULT_PARENT), + parent_partition = dict(default='Common'), + send = dict(required=False), + receive = dict(required=False), + ip = dict(required=False), + port = dict(required=False, type='int'), + interval = dict(required=False, type='int'), + timeout = dict(required=False, type='int'), + time_until_up = dict(required=False, type='int', default=0) + ), + supports_check_mode=True + ) + + server = module.params['server'] + user = module.params['user'] + password = module.params['password'] + partition = module.params['partition'] + parent_partition = module.params['parent_partition'] + state = module.params['state'] + name = module.params['name'] + type = 'TTYPE_' + module.params['type'].upper() + parent = "/%s/%s" % (parent_partition, module.params['parent']) + monitor = "/%s/%s" % (partition, name) + send = module.params['send'] + receive = module.params['receive'] + ip = module.params['ip'] + port = module.params['port'] + interval = module.params['interval'] + timeout = module.params['timeout'] + time_until_up = module.params['time_until_up'] + + # tcp monitor has multiple types, so overrule + global TEMPLATE_TYPE + TEMPLATE_TYPE = type + + # end monitor specific stuff + + if not bigsuds_found: + module.fail_json(msg="the python bigsuds module is required") + api = bigip_api(server, user, password) + monitor_exists = check_monitor_exists(module, api, monitor, parent) + + + # ipport is a special setting + if monitor_exists: # make sure to not update current settings if not asked + cur_ipport = get_ipport(api, monitor) + if ip is None: + ip = cur_ipport['ipport']['address'] + if port is None: + port = cur_ipport['ipport']['port'] + else: # use API defaults if not defined to create it + if interval is None: + interval = 5 + if timeout is None: + timeout = 16 + if ip is None: + ip = '0.0.0.0' + if port is None: + port = 0 + if send is None: + send = '' + if receive is None: + receive = '' + + # define and set address type + if ip == '0.0.0.0' and port == 0: + address_type = 'ATYPE_STAR_ADDRESS_STAR_PORT' + elif ip == '0.0.0.0' and port != 0: + address_type = 'ATYPE_STAR_ADDRESS_EXPLICIT_PORT' + elif ip != '0.0.0.0' and port != 0: + address_type = 'ATYPE_EXPLICIT_ADDRESS_EXPLICIT_PORT' + else: + address_type = 'ATYPE_UNSET' + + ipport = {'address_type': address_type, + 'ipport': {'address': ip, + 'port': port}} + + template_attributes = {'parent_template': parent, + 'interval': interval, + 'timeout': timeout, + 'dest_ipport': ipport, + 'is_read_only': False, + 'is_directly_usable': True} + + # monitor specific stuff + if type == 'TTYPE_TCP': + template_string_properties = [{'type': 'STYPE_SEND', + 'value': send}, + {'type': 'STYPE_RECEIVE', + 'value': receive}] + else: + template_string_properties = [] + + template_integer_properties = [{'type': 'ITYPE_INTERVAL', + 'value': interval}, + {'type': 'ITYPE_TIMEOUT', + 'value': timeout}, + {'type': 'ITYPE_TIME_UNTIL_UP', + 'value': interval}] + + # main logic, monitor generic + + try: + result = {'changed': False} # default + + + if state == 'absent': + if monitor_exists: + if not module.check_mode: + # possible race condition if same task + # on other node deleted it first + result['changed'] |= delete_monitor(api, monitor) + else: + result['changed'] |= True + + else: # state present + ## check for monitor itself + if not monitor_exists: # create it + if not module.check_mode: + # again, check changed status here b/c race conditions + # if other task already created it + result['changed'] |= create_monitor(api, monitor, template_attributes) + else: + result['changed'] |= True + + ## check for monitor parameters + # whether it already existed, or was just created, now update + # the update functions need to check for check mode but + # cannot update settings if it doesn't exist which happens in check mode + if monitor_exists and not module.check_mode: + result['changed'] |= update_monitor_properties(api, module, monitor, + template_string_properties, + template_integer_properties) + # else assume nothing changed + + # we just have to update the ipport if monitor already exists and it's different + if monitor_exists and cur_ipport != ipport: + set_ipport(api, monitor, ipport) + result['changed'] |= True + #else: monitor doesn't exist (check mode) or ipport is already ok + + + except Exception, e: + module.fail_json(msg="received exception: %s" % e) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() + diff --git a/lib/ansible/modules/extras/net_infrastructure/bigip_node b/lib/ansible/modules/extras/net_infrastructure/bigip_node new file mode 100644 index 00000000000..68b6a2b52f1 --- /dev/null +++ b/lib/ansible/modules/extras/net_infrastructure/bigip_node @@ -0,0 +1,294 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Matt Hite +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: bigip_node +short_description: "Manages F5 BIG-IP LTM nodes" +description: + - "Manages F5 BIG-IP LTM nodes via iControl SOAP API" +version_added: "1.4" +author: Matt Hite +notes: + - "Requires BIG-IP software version >= 11" + - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" + - "Best run as a local_action in your playbook" +requirements: + - bigsuds +options: + server: + description: + - BIG-IP host + required: true + default: null + choices: [] + aliases: [] + user: + description: + - BIG-IP username + required: true + default: null + choices: [] + aliases: [] + password: + description: + - BIG-IP password + required: true + default: null + choices: [] + aliases: [] + state: + description: + - Pool member state + required: true + default: present + choices: ['present', 'absent'] + aliases: [] + partition: + description: + - Partition + required: false + default: 'Common' + choices: [] + aliases: [] + name: + description: + - "Node name" + required: false + default: null + choices: [] + host: + description: + - "Node IP. Required when state=present and node does not exist. Error when state=absent." + required: true + default: null + choices: [] + aliases: ['address', 'ip'] + description: + description: + - "Node description." + required: false + default: null + choices: [] +''' + +EXAMPLES = ''' + +## playbook task examples: + +--- +# file bigip-test.yml +# ... +- hosts: bigip-test + tasks: + - name: Add node + local_action: > + bigip_node + server=lb.mydomain.com + user=admin + password=mysecret + state=present + partition=matthite + host="{{ ansible_default_ipv4["address"] }}" + name="{{ ansible_default_ipv4["address"] }}" + +# Note that the BIG-IP automatically names the node using the +# IP address specified in previous play's host parameter. +# Future plays referencing this node no longer use the host +# parameter but instead use the name parameter. +# Alternatively, you could have specified a name with the +# name parameter when state=present. + + - name: Modify node description + local_action: > + bigip_node + server=lb.mydomain.com + user=admin + password=mysecret + state=present + partition=matthite + name="{{ ansible_default_ipv4["address"] }}" + description="Our best server yet" + + - name: Delete node + local_action: > + bigip_node + server=lb.mydomain.com + user=admin + password=mysecret + state=absent + partition=matthite + name="{{ ansible_default_ipv4["address"] }}" + +''' + +try: + import bigsuds +except ImportError: + bigsuds_found = False +else: + bigsuds_found = True + +# ========================== +# bigip_node module specific +# + +def bigip_api(bigip, user, password): + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) + return api + +def node_exists(api, address): + # hack to determine if node exists + result = False + try: + api.LocalLB.NodeAddressV2.get_object_status(nodes=[address]) + result = True + except bigsuds.OperationFailed, e: + if "was not found" in str(e): + result = False + else: + # genuine exception + raise + return result + +def create_node_address(api, address, name): + try: + api.LocalLB.NodeAddressV2.create(nodes=[name], addresses=[address], limits=[0]) + result = True + desc = "" + except bigsuds.OperationFailed, e: + if "already exists" in str(e): + result = False + desc = "referenced name or IP already in use" + else: + # genuine exception + raise + return (result, desc) + +def get_node_address(api, name): + return api.LocalLB.NodeAddressV2.get_address(nodes=[name])[0] + +def delete_node_address(api, address): + try: + api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address]) + result = True + desc = "" + except bigsuds.OperationFailed, e: + if "is referenced by a member of pool" in str(e): + result = False + desc = "node referenced by pool" + else: + # genuine exception + raise + return (result, desc) + +def set_node_description(api, name, description): + api.LocalLB.NodeAddressV2.set_description(nodes=[name], + descriptions=[description]) + +def get_node_description(api, name): + return api.LocalLB.NodeAddressV2.get_description(nodes=[name])[0] + +def main(): + module = AnsibleModule( + argument_spec = dict( + server = dict(type='str', required=True), + user = dict(type='str', required=True), + password = dict(type='str', required=True), + state = dict(type='str', default='present', choices=['present', 'absent']), + partition = dict(type='str', default='Common'), + name = dict(type='str', required=True), + host = dict(type='str', aliases=['address', 'ip']), + description = dict(type='str') + ), + supports_check_mode=True + ) + + if not bigsuds_found: + module.fail_json(msg="the python bigsuds module is required") + + server = module.params['server'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + partition = module.params['partition'] + host = module.params['host'] + name = module.params['name'] + address = "/%s/%s" % (partition, name) + description = module.params['description'] + + if state == 'absent' and host is not None: + module.fail_json(msg="host parameter invalid when state=absent") + + try: + api = bigip_api(server, user, password) + result = {'changed': False} # default + + if state == 'absent': + if node_exists(api, address): + if not module.check_mode: + deleted, desc = delete_node_address(api, address) + if not deleted: + module.fail_json(msg="unable to delete: %s" % desc) + else: + result = {'changed': True} + else: + # check-mode return value + result = {'changed': True} + + elif state == 'present': + if not node_exists(api, address): + if host is None: + module.fail_json(msg="host parameter required when " \ + "state=present and node does not exist") + if not module.check_mode: + created, desc = create_node_address(api, address=host, name=address) + if not created: + module.fail_json(msg="unable to create: %s" % desc) + else: + result = {'changed': True} + if description is not None: + set_node_description(api, address, description) + result = {'changed': True} + else: + # check-mode return value + result = {'changed': True} + else: + # node exists -- potentially modify attributes + if host is not None: + if get_node_address(api, address) != host: + module.fail_json(msg="Changing the node address is " \ + "not supported by the API; " \ + "delete and recreate the node.") + if description is not None: + if get_node_description(api, address) != description: + if not module.check_mode: + set_node_description(api, address, description) + result = {'changed': True} + + except Exception, e: + module.fail_json(msg="received exception: %s" % e) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() + diff --git a/lib/ansible/modules/extras/net_infrastructure/bigip_pool b/lib/ansible/modules/extras/net_infrastructure/bigip_pool new file mode 100644 index 00000000000..48d03b9f1cb --- /dev/null +++ b/lib/ansible/modules/extras/net_infrastructure/bigip_pool @@ -0,0 +1,536 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Matt Hite +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: bigip_pool +short_description: "Manages F5 BIG-IP LTM pools" +description: + - "Manages F5 BIG-IP LTM pools via iControl SOAP API" +version_added: "1.2" +author: Matt Hite +notes: + - "Requires BIG-IP software version >= 11" + - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" + - "Best run as a local_action in your playbook" +requirements: + - bigsuds +options: + server: + description: + - BIG-IP host + required: true + default: null + choices: [] + aliases: [] + user: + description: + - BIG-IP username + required: true + default: null + choices: [] + aliases: [] + password: + description: + - BIG-IP password + required: true + default: null + choices: [] + aliases: [] + state: + description: + - Pool/pool member state + required: false + default: present + choices: ['present', 'absent'] + aliases: [] + name: + description: + - Pool name + required: true + default: null + choices: [] + aliases: ['pool'] + partition: + description: + - Partition of pool/pool member + required: false + default: 'Common' + choices: [] + aliases: [] + lb_method: + description: + - Load balancing method + version_added: "1.3" + required: False + default: 'round_robin' + choices: ['round_robin', 'ratio_member', 'least_connection_member', + 'observed_member', 'predictive_member', 'ratio_node_address', + 'least_connection_node_address', 'fastest_node_address', + 'observed_node_address', 'predictive_node_address', + 'dynamic_ratio', 'fastest_app_response', 'least_sessions', + 'dynamic_ratio_member', 'l3_addr', 'unknown', + 'weighted_least_connection_member', + 'weighted_least_connection_node_address', + 'ratio_session', 'ratio_least_connection_member', + 'ratio_least_connection_node_address'] + aliases: [] + monitor_type: + description: + - Monitor rule type when monitors > 1 + version_added: "1.3" + required: False + default: null + choices: ['and_list', 'm_of_n'] + aliases: [] + quorum: + description: + - Monitor quorum value when monitor_type is m_of_n + version_added: "1.3" + required: False + default: null + choices: [] + aliases: [] + monitors: + description: + - Monitor template name list. Always use the full path to the monitor. + version_added: "1.3" + required: False + default: null + choices: [] + aliases: [] + slow_ramp_time: + description: + - Sets the ramp-up time (in seconds) to gradually ramp up the load on newly added or freshly detected up pool members + version_added: "1.3" + required: False + default: null + choices: [] + aliases: [] + service_down_action: + description: + - Sets the action to take when node goes down in pool + version_added: "1.3" + required: False + default: null + choices: ['none', 'reset', 'drop', 'reselect'] + aliases: [] + host: + description: + - "Pool member IP" + required: False + default: null + choices: [] + aliases: ['address'] + port: + description: + - "Pool member port" + required: False + default: null + choices: [] + aliases: [] +''' + +EXAMPLES = ''' + +## playbook task examples: + +--- +# file bigip-test.yml +# ... +- hosts: localhost + tasks: + - name: Create pool + local_action: > + bigip_pool + server=lb.mydomain.com + user=admin + password=mysecret + state=present + name=matthite-pool + partition=matthite + lb_method=least_connection_member + slow_ramp_time=120 + + - name: Modify load balancer method + local_action: > + bigip_pool + server=lb.mydomain.com + user=admin + password=mysecret + state=present + name=matthite-pool + partition=matthite + lb_method=round_robin + +- hosts: bigip-test + tasks: + - name: Add pool member + local_action: > + bigip_pool + server=lb.mydomain.com + user=admin + password=mysecret + state=present + name=matthite-pool + partition=matthite + host="{{ ansible_default_ipv4["address"] }}" + port=80 + + - name: Remove pool member from pool + local_action: > + bigip_pool + server=lb.mydomain.com + user=admin + password=mysecret + state=absent + name=matthite-pool + partition=matthite + host="{{ ansible_default_ipv4["address"] }}" + port=80 + +- hosts: localhost + tasks: + - name: Delete pool + local_action: > + bigip_pool + server=lb.mydomain.com + user=admin + password=mysecret + state=absent + name=matthite-pool + partition=matthite + +''' + +try: + import bigsuds +except ImportError: + bigsuds_found = False +else: + bigsuds_found = True + +# =========================================== +# bigip_pool module specific support methods. +# + +def bigip_api(bigip, user, password): + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) + return api + +def pool_exists(api, pool): + # hack to determine if pool exists + result = False + try: + api.LocalLB.Pool.get_object_status(pool_names=[pool]) + result = True + except bigsuds.OperationFailed, e: + if "was not found" in str(e): + result = False + else: + # genuine exception + raise + return result + +def create_pool(api, pool, lb_method): + # create requires lb_method but we don't want to default + # to a value on subsequent runs + if not lb_method: + lb_method = 'round_robin' + lb_method = "LB_METHOD_%s" % lb_method.strip().upper() + api.LocalLB.Pool.create_v2(pool_names=[pool], lb_methods=[lb_method], + members=[[]]) + +def remove_pool(api, pool): + api.LocalLB.Pool.delete_pool(pool_names=[pool]) + +def get_lb_method(api, pool): + lb_method = api.LocalLB.Pool.get_lb_method(pool_names=[pool])[0] + lb_method = lb_method.strip().replace('LB_METHOD_', '').lower() + return lb_method + +def set_lb_method(api, pool, lb_method): + lb_method = "LB_METHOD_%s" % lb_method.strip().upper() + api.LocalLB.Pool.set_lb_method(pool_names=[pool], lb_methods=[lb_method]) + +def get_monitors(api, pool): + result = api.LocalLB.Pool.get_monitor_association(pool_names=[pool])[0]['monitor_rule'] + monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower() + quorum = result['quorum'] + monitor_templates = result['monitor_templates'] + return (monitor_type, quorum, monitor_templates) + +def set_monitors(api, pool, monitor_type, quorum, monitor_templates): + monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper() + monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates} + monitor_association = {'pool_name': pool, 'monitor_rule': monitor_rule} + api.LocalLB.Pool.set_monitor_association(monitor_associations=[monitor_association]) + +def get_slow_ramp_time(api, pool): + result = api.LocalLB.Pool.get_slow_ramp_time(pool_names=[pool])[0] + return result + +def set_slow_ramp_time(api, pool, seconds): + api.LocalLB.Pool.set_slow_ramp_time(pool_names=[pool], values=[seconds]) + +def get_action_on_service_down(api, pool): + result = api.LocalLB.Pool.get_action_on_service_down(pool_names=[pool])[0] + result = result.split("SERVICE_DOWN_ACTION_")[-1].lower() + return result + +def set_action_on_service_down(api, pool, action): + action = "SERVICE_DOWN_ACTION_%s" % action.strip().upper() + api.LocalLB.Pool.set_action_on_service_down(pool_names=[pool], actions=[action]) + +def member_exists(api, pool, address, port): + # hack to determine if member exists + result = False + try: + members = [{'address': address, 'port': port}] + api.LocalLB.Pool.get_member_object_status(pool_names=[pool], + members=[members]) + result = True + except bigsuds.OperationFailed, e: + if "was not found" in str(e): + result = False + else: + # genuine exception + raise + return result + +def delete_node_address(api, address): + result = False + try: + api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address]) + result = True + except bigsuds.OperationFailed, e: + if "is referenced by a member of pool" in str(e): + result = False + else: + # genuine exception + raise + return result + +def remove_pool_member(api, pool, address, port): + members = [{'address': address, 'port': port}] + api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members]) + +def add_pool_member(api, pool, address, port): + members = [{'address': address, 'port': port}] + api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members]) + +def main(): + lb_method_choices = ['round_robin', 'ratio_member', + 'least_connection_member', 'observed_member', + 'predictive_member', 'ratio_node_address', + 'least_connection_node_address', + 'fastest_node_address', 'observed_node_address', + 'predictive_node_address', 'dynamic_ratio', + 'fastest_app_response', 'least_sessions', + 'dynamic_ratio_member', 'l3_addr', 'unknown', + 'weighted_least_connection_member', + 'weighted_least_connection_node_address', + 'ratio_session', 'ratio_least_connection_member', + 'ratio_least_connection_node_address'] + + monitor_type_choices = ['and_list', 'm_of_n'] + + service_down_choices = ['none', 'reset', 'drop', 'reselect'] + + module = AnsibleModule( + argument_spec = dict( + server = dict(type='str', required=True), + user = dict(type='str', required=True), + password = dict(type='str', required=True), + state = dict(type='str', default='present', choices=['present', 'absent']), + name = dict(type='str', required=True, aliases=['pool']), + partition = dict(type='str', default='Common'), + lb_method = dict(type='str', choices=lb_method_choices), + monitor_type = dict(type='str', choices=monitor_type_choices), + quorum = dict(type='int'), + monitors = dict(type='list'), + slow_ramp_time = dict(type='int'), + service_down_action = dict(type='str', choices=service_down_choices), + host = dict(type='str', aliases=['address']), + port = dict(type='int') + ), + supports_check_mode=True + ) + + if not bigsuds_found: + module.fail_json(msg="the python bigsuds module is required") + + server = module.params['server'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + name = module.params['name'] + partition = module.params['partition'] + pool = "/%s/%s" % (partition, name) + lb_method = module.params['lb_method'] + if lb_method: + lb_method = lb_method.lower() + monitor_type = module.params['monitor_type'] + if monitor_type: + monitor_type = monitor_type.lower() + quorum = module.params['quorum'] + monitors = module.params['monitors'] + if monitors: + monitors = [] + for monitor in module.params['monitors']: + if "/" not in monitor: + monitors.append("/%s/%s" % (partition, monitor)) + else: + monitors.append(monitor) + slow_ramp_time = module.params['slow_ramp_time'] + service_down_action = module.params['service_down_action'] + if service_down_action: + service_down_action = service_down_action.lower() + host = module.params['host'] + address = "/%s/%s" % (partition, host) + port = module.params['port'] + + # sanity check user supplied values + + if (host and not port) or (port and not host): + module.fail_json(msg="both host and port must be supplied") + + if 1 > port > 65535: + module.fail_json(msg="valid ports must be in range 1 - 65535") + + if monitors: + if len(monitors) == 1: + # set default required values for single monitor + quorum = 0 + monitor_type = 'single' + elif len(monitors) > 1: + if not monitor_type: + module.fail_json(msg="monitor_type required for monitors > 1") + if monitor_type == 'm_of_n' and not quorum: + module.fail_json(msg="quorum value required for monitor_type m_of_n") + if monitor_type != 'm_of_n': + quorum = 0 + elif monitor_type: + # no monitors specified but monitor_type exists + module.fail_json(msg="monitor_type require monitors parameter") + elif quorum is not None: + # no monitors specified but quorum exists + module.fail_json(msg="quorum requires monitors parameter") + + try: + api = bigip_api(server, user, password) + result = {'changed': False} # default + + if state == 'absent': + if host and port and pool: + # member removal takes precedent + if pool_exists(api, pool) and member_exists(api, pool, address, port): + if not module.check_mode: + remove_pool_member(api, pool, address, port) + deleted = delete_node_address(api, address) + result = {'changed': True, 'deleted': deleted} + else: + result = {'changed': True} + elif pool_exists(api, pool): + # no host/port supplied, must be pool removal + if not module.check_mode: + # hack to handle concurrent runs of module + # pool might be gone before we actually remove it + try: + remove_pool(api, pool) + result = {'changed': True} + except bigsuds.OperationFailed, e: + if "was not found" in str(e): + result = {'changed': False} + else: + # genuine exception + raise + else: + # check-mode return value + result = {'changed': True} + + elif state == 'present': + update = False + if not pool_exists(api, pool): + # pool does not exist -- need to create it + if not module.check_mode: + # a bit of a hack to handle concurrent runs of this module. + # even though we've checked the pool doesn't exist, + # it may exist by the time we run create_pool(). + # this catches the exception and does something smart + # about it! + try: + create_pool(api, pool, lb_method) + result = {'changed': True} + except bigsuds.OperationFailed, e: + if "already exists" in str(e): + update = True + else: + # genuine exception + raise + else: + if monitors: + set_monitors(api, pool, monitor_type, quorum, monitors) + if slow_ramp_time: + set_slow_ramp_time(api, pool, slow_ramp_time) + if service_down_action: + set_action_on_service_down(api, pool, service_down_action) + if host and port: + add_pool_member(api, pool, address, port) + else: + # check-mode return value + result = {'changed': True} + else: + # pool exists -- potentially modify attributes + update = True + + if update: + if lb_method and lb_method != get_lb_method(api, pool): + if not module.check_mode: + set_lb_method(api, pool, lb_method) + result = {'changed': True} + if monitors: + t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, pool) + if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)): + if not module.check_mode: + set_monitors(api, pool, monitor_type, quorum, monitors) + result = {'changed': True} + if slow_ramp_time and slow_ramp_time != get_slow_ramp_time(api, pool): + if not module.check_mode: + set_slow_ramp_time(api, pool, slow_ramp_time) + result = {'changed': True} + if service_down_action and service_down_action != get_action_on_service_down(api, pool): + if not module.check_mode: + set_action_on_service_down(api, pool, service_down_action) + result = {'changed': True} + if (host and port) and not member_exists(api, pool, address, port): + if not module.check_mode: + add_pool_member(api, pool, address, port) + result = {'changed': True} + + except Exception, e: + module.fail_json(msg="received exception: %s" % e) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() + diff --git a/lib/ansible/modules/extras/net_infrastructure/bigip_pool_member b/lib/ansible/modules/extras/net_infrastructure/bigip_pool_member new file mode 100644 index 00000000000..5aef9f0ae98 --- /dev/null +++ b/lib/ansible/modules/extras/net_infrastructure/bigip_pool_member @@ -0,0 +1,378 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Matt Hite +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: bigip_pool_member +short_description: "Manages F5 BIG-IP LTM pool members" +description: + - "Manages F5 BIG-IP LTM pool members via iControl SOAP API" +version_added: "1.4" +author: Matt Hite +notes: + - "Requires BIG-IP software version >= 11" + - "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)" + - "Best run as a local_action in your playbook" + - "Supersedes bigip_pool for managing pool members" + +requirements: + - bigsuds +options: + server: + description: + - BIG-IP host + required: true + default: null + choices: [] + aliases: [] + user: + description: + - BIG-IP username + required: true + default: null + choices: [] + aliases: [] + password: + description: + - BIG-IP password + required: true + default: null + choices: [] + aliases: [] + state: + description: + - Pool member state + required: true + default: present + choices: ['present', 'absent'] + aliases: [] + pool: + description: + - Pool name. This pool must exist. + required: true + default: null + choices: [] + aliases: [] + partition: + description: + - Partition + required: false + default: 'Common' + choices: [] + aliases: [] + host: + description: + - Pool member IP + required: true + default: null + choices: [] + aliases: ['address', 'name'] + port: + description: + - Pool member port + required: true + default: null + choices: [] + aliases: [] + connection_limit: + description: + - Pool member connection limit. Setting this to 0 disables the limit. + required: false + default: null + choices: [] + aliases: [] + description: + description: + - Pool member description + required: false + default: null + choices: [] + aliases: [] + rate_limit: + description: + - Pool member rate limit (connections-per-second). Setting this to 0 disables the limit. + required: false + default: null + choices: [] + aliases: [] + ratio: + description: + - Pool member ratio weight. Valid values range from 1 through 100. New pool members -- unless overriden with this value -- default to 1. + required: false + default: null + choices: [] + aliases: [] +''' + +EXAMPLES = ''' + +## playbook task examples: + +--- +# file bigip-test.yml +# ... +- hosts: bigip-test + tasks: + - name: Add pool member + local_action: > + bigip_pool_member + server=lb.mydomain.com + user=admin + password=mysecret + state=present + pool=matthite-pool + partition=matthite + host="{{ ansible_default_ipv4["address"] }}" + port=80 + description="web server" + connection_limit=100 + rate_limit=50 + ratio=2 + + - name: Modify pool member ratio and description + local_action: > + bigip_pool_member + server=lb.mydomain.com + user=admin + password=mysecret + state=present + pool=matthite-pool + partition=matthite + host="{{ ansible_default_ipv4["address"] }}" + port=80 + ratio=1 + description="nginx server" + + - name: Remove pool member from pool + local_action: > + bigip_pool_member + server=lb.mydomain.com + user=admin + password=mysecret + state=absent + pool=matthite-pool + partition=matthite + host="{{ ansible_default_ipv4["address"] }}" + port=80 + +''' + +try: + import bigsuds +except ImportError: + bigsuds_found = False +else: + bigsuds_found = True + +# =========================================== +# bigip_pool_member module specific support methods. +# + +def bigip_api(bigip, user, password): + api = bigsuds.BIGIP(hostname=bigip, username=user, password=password) + return api + +def pool_exists(api, pool): + # hack to determine if pool exists + result = False + try: + api.LocalLB.Pool.get_object_status(pool_names=[pool]) + result = True + except bigsuds.OperationFailed, e: + if "was not found" in str(e): + result = False + else: + # genuine exception + raise + return result + +def member_exists(api, pool, address, port): + # hack to determine if member exists + result = False + try: + members = [{'address': address, 'port': port}] + api.LocalLB.Pool.get_member_object_status(pool_names=[pool], + members=[members]) + result = True + except bigsuds.OperationFailed, e: + if "was not found" in str(e): + result = False + else: + # genuine exception + raise + return result + +def delete_node_address(api, address): + result = False + try: + api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address]) + result = True + except bigsuds.OperationFailed, e: + if "is referenced by a member of pool" in str(e): + result = False + else: + # genuine exception + raise + return result + +def remove_pool_member(api, pool, address, port): + members = [{'address': address, 'port': port}] + api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members]) + +def add_pool_member(api, pool, address, port): + members = [{'address': address, 'port': port}] + api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members]) + +def get_connection_limit(api, pool, address, port): + members = [{'address': address, 'port': port}] + result = api.LocalLB.Pool.get_member_connection_limit(pool_names=[pool], members=[members])[0][0] + return result + +def set_connection_limit(api, pool, address, port, limit): + members = [{'address': address, 'port': port}] + api.LocalLB.Pool.set_member_connection_limit(pool_names=[pool], members=[members], limits=[[limit]]) + +def get_description(api, pool, address, port): + members = [{'address': address, 'port': port}] + result = api.LocalLB.Pool.get_member_description(pool_names=[pool], members=[members])[0][0] + return result + +def set_description(api, pool, address, port, description): + members = [{'address': address, 'port': port}] + api.LocalLB.Pool.set_member_description(pool_names=[pool], members=[members], descriptions=[[description]]) + +def get_rate_limit(api, pool, address, port): + members = [{'address': address, 'port': port}] + result = api.LocalLB.Pool.get_member_rate_limit(pool_names=[pool], members=[members])[0][0] + return result + +def set_rate_limit(api, pool, address, port, limit): + members = [{'address': address, 'port': port}] + api.LocalLB.Pool.set_member_rate_limit(pool_names=[pool], members=[members], limits=[[limit]]) + +def get_ratio(api, pool, address, port): + members = [{'address': address, 'port': port}] + result = api.LocalLB.Pool.get_member_ratio(pool_names=[pool], members=[members])[0][0] + return result + +def set_ratio(api, pool, address, port, ratio): + members = [{'address': address, 'port': port}] + api.LocalLB.Pool.set_member_ratio(pool_names=[pool], members=[members], ratios=[[ratio]]) + +def main(): + module = AnsibleModule( + argument_spec = dict( + server = dict(type='str', required=True), + user = dict(type='str', required=True), + password = dict(type='str', required=True), + state = dict(type='str', default='present', choices=['present', 'absent']), + pool = dict(type='str', required=True), + partition = dict(type='str', default='Common'), + host = dict(type='str', required=True, aliases=['address', 'name']), + port = dict(type='int', required=True), + connection_limit = dict(type='int'), + description = dict(type='str'), + rate_limit = dict(type='int'), + ratio = dict(type='int') + ), + supports_check_mode=True + ) + + if not bigsuds_found: + module.fail_json(msg="the python bigsuds module is required") + + server = module.params['server'] + user = module.params['user'] + password = module.params['password'] + state = module.params['state'] + partition = module.params['partition'] + pool = "/%s/%s" % (partition, module.params['pool']) + connection_limit = module.params['connection_limit'] + description = module.params['description'] + rate_limit = module.params['rate_limit'] + ratio = module.params['ratio'] + host = module.params['host'] + address = "/%s/%s" % (partition, host) + port = module.params['port'] + + # sanity check user supplied values + + if (host and not port) or (port and not host): + module.fail_json(msg="both host and port must be supplied") + + if 1 > port > 65535: + module.fail_json(msg="valid ports must be in range 1 - 65535") + + try: + api = bigip_api(server, user, password) + if not pool_exists(api, pool): + module.fail_json(msg="pool %s does not exist" % pool) + result = {'changed': False} # default + + if state == 'absent': + if member_exists(api, pool, address, port): + if not module.check_mode: + remove_pool_member(api, pool, address, port) + deleted = delete_node_address(api, address) + result = {'changed': True, 'deleted': deleted} + else: + result = {'changed': True} + + elif state == 'present': + if not member_exists(api, pool, address, port): + if not module.check_mode: + add_pool_member(api, pool, address, port) + if connection_limit is not None: + set_connection_limit(api, pool, address, port, connection_limit) + if description is not None: + set_description(api, pool, address, port, description) + if rate_limit is not None: + set_rate_limit(api, pool, address, port, rate_limit) + if ratio is not None: + set_ratio(api, pool, address, port, ratio) + result = {'changed': True} + else: + # pool member exists -- potentially modify attributes + if connection_limit is not None and connection_limit != get_connection_limit(api, pool, address, port): + if not module.check_mode: + set_connection_limit(api, pool, address, port, connection_limit) + result = {'changed': True} + if description is not None and description != get_description(api, pool, address, port): + if not module.check_mode: + set_description(api, pool, address, port, description) + result = {'changed': True} + if rate_limit is not None and rate_limit != get_rate_limit(api, pool, address, port): + if not module.check_mode: + set_rate_limit(api, pool, address, port, rate_limit) + result = {'changed': True} + if ratio is not None and ratio != get_ratio(api, pool, address, port): + if not module.check_mode: + set_ratio(api, pool, address, port, ratio) + result = {'changed': True} + + except Exception, e: + module.fail_json(msg="received exception: %s" % e) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() + diff --git a/lib/ansible/modules/extras/net_infrastructure/dnsimple b/lib/ansible/modules/extras/net_infrastructure/dnsimple new file mode 100755 index 00000000000..19b167dee19 --- /dev/null +++ b/lib/ansible/modules/extras/net_infrastructure/dnsimple @@ -0,0 +1,302 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: dnsimple +version_added: "1.6" +short_description: Interface with dnsimple.com (a DNS hosting service). +description: + - "Manages domains and records via the DNSimple API, see the docs: U(http://developer.dnsimple.com/)" +options: + account_email: + description: + - "Account email. If omitted, the env variables DNSIMPLE_EMAIL and DNSIMPLE_API_TOKEN will be looked for. If those aren't found, a C(.dnsimple) file will be looked for, see: U(https://github.com/mikemaccana/dnsimple-python#getting-started)" + required: false + default: null + + account_api_token: + description: + - Account API token. See I(account_email) for info. + required: false + default: null + + domain: + description: + - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNSimple. If omitted, a list of domains will be returned. + - If domain is present but the domain doesn't exist, it will be created. + required: false + default: null + + record: + description: + - Record to add, if blank a record for the domain will be created, supports the wildcard (*) + required: false + default: null + + record_ids: + description: + - List of records to ensure they either exist or don't exist + required: false + default: null + + type: + description: + - The type of DNS record to create + required: false + choices: [ 'A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL' ] + default: null + + ttl: + description: + - The TTL to give the new record + required: false + default: 3600 (one hour) + + value: + description: + - Record value + - "Must be specified when trying to ensure a record exists" + required: false + default: null + + priority: + description: + - Record priority + required: false + default: null + + state: + description: + - whether the record should exist or not + required: false + choices: [ 'present', 'absent' ] + default: null + + solo: + description: + - Whether the record should be the only one for that record type and record name. Only use with state=present on a record + required: false + default: null + +requirements: [ dnsimple ] +author: Alex Coomans +''' + +EXAMPLES = ''' +# authenicate using email and API token +- local_action: dnsimple account_email=test@example.com account_api_token=dummyapitoken + +# fetch all domains +- local_action dnsimple + register: domains + +# fetch my.com domain records +- local_action: dnsimple domain=my.com state=present + register: records + +# delete a domain +- local_action: dnsimple domain=my.com state=absent + +# create a test.my.com A record to point to 127.0.0.01 +- local_action: dnsimple domain=my.com record=test type=A value=127.0.0.1 + register: record + +# and then delete it +- local_action: dnsimple domain=my.com record_ids={{ record['id'] }} + +# create a my.com CNAME record to example.com +- local_action: dnsimple domain=my.com record= type=CNAME value=example.com state=present + +# change it's ttl +- local_action: dnsimple domain=my.com record= type=CNAME value=example.com ttl=600 state=present + +# and delete the record +- local_action: dnsimpledomain=my.com record= type=CNAME value=example.com state=absent + +''' + +import os +try: + from dnsimple import DNSimple + from dnsimple.dnsimple import DNSimpleException +except ImportError: + print "failed=True msg='dnsimple required for this module'" + sys.exit(1) + +def main(): + module = AnsibleModule( + argument_spec = dict( + account_email = dict(required=False), + account_api_token = dict(required=False, no_log=True), + domain = dict(required=False), + record = dict(required=False), + record_ids = dict(required=False, type='list'), + type = dict(required=False, choices=['A', 'ALIAS', 'CNAME', 'MX', 'SPF', 'URL', 'TXT', 'NS', 'SRV', 'NAPTR', 'PTR', 'AAAA', 'SSHFP', 'HINFO', 'POOL']), + ttl = dict(required=False, default=3600, type='int'), + value = dict(required=False), + priority = dict(required=False, type='int'), + state = dict(required=False, choices=['present', 'absent']), + solo = dict(required=False, type='bool'), + ), + required_together = ( + ['record', 'value'] + ), + supports_check_mode = True, + ) + + account_email = module.params.get('account_email') + account_api_token = module.params.get('account_api_token') + domain = module.params.get('domain') + record = module.params.get('record') + record_ids = module.params.get('record_ids') + record_type = module.params.get('type') + ttl = module.params.get('ttl') + value = module.params.get('value') + priority = module.params.get('priority') + state = module.params.get('state') + is_solo = module.params.get('solo') + + if account_email and account_api_token: + client = DNSimple(email=account_email, api_token=account_api_token) + elif os.environ.get('DNSIMPLE_EMAIL') and os.environ.get('DNSIMPLE_API_TOKEN'): + client = DNSimple(email=os.environ.get('DNSIMPLE_EMAIL'), api_token=os.environ.get('DNSIMPLE_API_TOKEN')) + else: + client = DNSimple() + + try: + # Let's figure out what operation we want to do + + # No domain, return a list + if not domain: + domains = client.domains() + module.exit_json(changed=False, result=[d['domain'] for d in domains]) + + # Domain & No record + if domain and record is None and not record_ids: + domains = [d['domain'] for d in client.domains()] + if domain.isdigit(): + dr = next((d for d in domains if d['id'] == int(domain)), None) + else: + dr = next((d for d in domains if d['name'] == domain), None) + if state == 'present': + if dr: + module.exit_json(changed=False, result=dr) + else: + if module.check_mode: + module.exit_json(changed=True) + else: + module.exit_json(changed=True, result=client.add_domain(domain)['domain']) + elif state == 'absent': + if dr: + if not module.check_mode: + client.delete(domain) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + else: + module.fail_json(msg="'%s' is an unknown value for the state argument" % state) + + # need the not none check since record could be an empty string + if domain and record is not None: + records = [r['record'] for r in client.records(str(domain))] + + if not record_type: + module.fail_json(msg="Missing the record type") + + if not value: + module.fail_json(msg="Missing the record value") + + rr = next((r for r in records if r['name'] == record and r['record_type'] == record_type and r['content'] == value), None) + + if state == 'present': + changed = False + if is_solo: + # delete any records that have the same name and record type + same_type = [r['id'] for r in records if r['name'] == record and r['record_type'] == record_type] + if rr: + same_type = [rid for rid in same_type if rid != rr['id']] + if same_type: + if not module.check_mode: + for rid in same_type: + client.delete_record(str(domain), rid) + changed = True + if rr: + # check if we need to update + if rr['ttl'] != ttl or rr['prio'] != priority: + data = {} + if ttl: data['ttl'] = ttl + if priority: data['prio'] = priority + if module.check_mode: + module.exit_json(changed=True) + else: + module.exit_json(changed=True, result=client.update_record(str(domain), str(rr['id']), data)['record']) + else: + module.exit_json(changed=changed, result=rr) + else: + # create it + data = { + 'name': record, + 'record_type': record_type, + 'content': value, + } + if ttl: data['ttl'] = ttl + if priority: data['prio'] = priority + if module.check_mode: + module.exit_json(changed=True) + else: + module.exit_json(changed=True, result=client.add_record(str(domain), data)['record']) + elif state == 'absent': + if rr: + if not module.check_mode: + client.delete_record(str(domain), rr['id']) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + else: + module.fail_json(msg="'%s' is an unknown value for the state argument" % state) + + # Make sure these record_ids either all exist or none + if domain and record_ids: + current_records = [str(r['record']['id']) for r in client.records(str(domain))] + wanted_records = [str(r) for r in record_ids] + if state == 'present': + difference = list(set(wanted_records) - set(current_records)) + if difference: + module.fail_json(msg="Missing the following records: %s" % difference) + else: + module.exit_json(changed=False) + elif state == 'absent': + difference = list(set(wanted_records) & set(current_records)) + if difference: + if not module.check_mode: + for rid in difference: + client.delete_record(str(domain), rid) + module.exit_json(changed=True) + else: + module.exit_json(changed=False) + else: + module.fail_json(msg="'%s' is an unknown value for the state argument" % state) + + except DNSimpleException, e: + module.fail_json(msg="Unable to contact DNSimple: %s" % e.message) + + module.fail_json(msg="Unknown what you wanted me to do") + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/net_infrastructure/dnsmadeeasy b/lib/ansible/modules/extras/net_infrastructure/dnsmadeeasy new file mode 100644 index 00000000000..148e25a5011 --- /dev/null +++ b/lib/ansible/modules/extras/net_infrastructure/dnsmadeeasy @@ -0,0 +1,329 @@ +#!/usr/bin/python +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: dnsmadeeasy +version_added: "1.3" +short_description: Interface with dnsmadeeasy.com (a DNS hosting service). +description: + - "Manages DNS records via the v2 REST API of the DNS Made Easy service. It handles records only; there is no manipulation of domains or monitor/account support yet. See: U(http://www.dnsmadeeasy.com/services/rest-api/)" +options: + account_key: + description: + - Accout API Key. + required: true + default: null + + account_secret: + description: + - Accout Secret Key. + required: true + default: null + + domain: + description: + - Domain to work with. Can be the domain name (e.g. "mydomain.com") or the numeric ID of the domain in DNS Made Easy (e.g. "839989") for faster resolution. + required: true + default: null + + record_name: + description: + - Record name to get/create/delete/update. If record_name is not specified; all records for the domain will be returned in "result" regardless of the state argument. + required: false + default: null + + record_type: + description: + - Record type. + required: false + choices: [ 'A', 'AAAA', 'CNAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT' ] + default: null + + record_value: + description: + - "Record value. HTTPRED: , MX: , NS: , PTR: , SRV: , TXT: " + - "If record_value is not specified; no changes will be made and the record will be returned in 'result' (in other words, this module can be used to fetch a record's current id, type, and ttl)" + required: false + default: null + + record_ttl: + description: + - record's "Time to live". Number of seconds the record remains cached in DNS servers. + required: false + default: 1800 + + state: + description: + - whether the record should exist or not + required: true + choices: [ 'present', 'absent' ] + default: null + + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 + +notes: + - The DNS Made Easy service requires that machines interacting with the API have the proper time and timezone set. Be sure you are within a few seconds of actual time by using NTP. + - This module returns record(s) in the "result" element when 'state' is set to 'present'. This value can be be registered and used in your playbooks. + +requirements: [ urllib, urllib2, hashlib, hmac ] +author: Brice Burgess +''' + +EXAMPLES = ''' +# fetch my.com domain records +- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present + register: response + +# create / ensure the presence of a record +- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" record_type="A" record_value="127.0.0.1" + +# update the previously created record +- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" record_value="192.168.0.1" + +# fetch a specific record +- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=present record_name="test" + register: response + +# delete a record / ensure it is absent +- dnsmadeeasy: account_key=key account_secret=secret domain=my.com state=absent record_name="test" +''' + +# ============================================ +# DNSMadeEasy module specific support methods. +# + +IMPORT_ERROR = None +try: + import json + from time import strftime, gmtime + import hashlib + import hmac +except ImportError, e: + IMPORT_ERROR = str(e) + +class DME2: + + def __init__(self, apikey, secret, domain, module): + self.module = module + + self.api = apikey + self.secret = secret + self.baseurl = 'https://api.dnsmadeeasy.com/V2.0/' + self.domain = str(domain) + self.domain_map = None # ["domain_name"] => ID + self.record_map = None # ["record_name"] => ID + self.records = None # ["record_ID"] => + + # Lookup the domain ID if passed as a domain name vs. ID + if not self.domain.isdigit(): + self.domain = self.getDomainByName(self.domain)['id'] + + self.record_url = 'dns/managed/' + str(self.domain) + '/records' + + def _headers(self): + currTime = self._get_date() + hashstring = self._create_hash(currTime) + headers = {'x-dnsme-apiKey': self.api, + 'x-dnsme-hmac': hashstring, + 'x-dnsme-requestDate': currTime, + 'content-type': 'application/json'} + return headers + + def _get_date(self): + return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime()) + + def _create_hash(self, rightnow): + return hmac.new(self.secret.encode(), rightnow.encode(), hashlib.sha1).hexdigest() + + def query(self, resource, method, data=None): + url = self.baseurl + resource + if data and not isinstance(data, basestring): + data = urllib.urlencode(data) + + response, info = fetch_url(self.module, url, data=data, method=method, headers=self._headers()) + if info['status'] not in (200, 201, 204): + self.module.fail_json(msg="%s returned %s, with body: %s" % (url, info['status'], info['msg'])) + + try: + return json.load(response) + except Exception, e: + return {} + + def getDomain(self, domain_id): + if not self.domain_map: + self._instMap('domain') + + return self.domains.get(domain_id, False) + + def getDomainByName(self, domain_name): + if not self.domain_map: + self._instMap('domain') + + return self.getDomain(self.domain_map.get(domain_name, 0)) + + def getDomains(self): + return self.query('dns/managed', 'GET')['data'] + + def getRecord(self, record_id): + if not self.record_map: + self._instMap('record') + + return self.records.get(record_id, False) + + def getRecordByName(self, record_name): + if not self.record_map: + self._instMap('record') + + return self.getRecord(self.record_map.get(record_name, 0)) + + def getRecords(self): + return self.query(self.record_url, 'GET')['data'] + + def _instMap(self, type): + #@TODO cache this call so it's executed only once per ansible execution + map = {} + results = {} + + # iterate over e.g. self.getDomains() || self.getRecords() + for result in getattr(self, 'get' + type.title() + 's')(): + + map[result['name']] = result['id'] + results[result['id']] = result + + # e.g. self.domain_map || self.record_map + setattr(self, type + '_map', map) + setattr(self, type + 's', results) # e.g. self.domains || self.records + + def prepareRecord(self, data): + return json.dumps(data, separators=(',', ':')) + + def createRecord(self, data): + #@TODO update the cache w/ resultant record + id when impleneted + return self.query(self.record_url, 'POST', data) + + def updateRecord(self, record_id, data): + #@TODO update the cache w/ resultant record + id when impleneted + return self.query(self.record_url + '/' + str(record_id), 'PUT', data) + + def deleteRecord(self, record_id): + #@TODO remove record from the cache when impleneted + return self.query(self.record_url + '/' + str(record_id), 'DELETE') + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + account_key=dict(required=True), + account_secret=dict(required=True, no_log=True), + domain=dict(required=True), + state=dict(required=True, choices=['present', 'absent']), + record_name=dict(required=False), + record_type=dict(required=False, choices=[ + 'A', 'AAAA', 'CNAME', 'HTTPRED', 'MX', 'NS', 'PTR', 'SRV', 'TXT']), + record_value=dict(required=False), + record_ttl=dict(required=False, default=1800, type='int'), + validate_certs = dict(default='yes', type='bool'), + ), + required_together=( + ['record_value', 'record_ttl', 'record_type'] + ) + ) + + if IMPORT_ERROR: + module.fail_json(msg="Import Error: " + IMPORT_ERROR) + + DME = DME2(module.params["account_key"], module.params[ + "account_secret"], module.params["domain"], module) + state = module.params["state"] + record_name = module.params["record_name"] + + # Follow Keyword Controlled Behavior + if not record_name: + domain_records = DME.getRecords() + if not domain_records: + module.fail_json( + msg="The requested domain name is not accessible with this api_key; try using its ID if known.") + module.exit_json(changed=False, result=domain_records) + + # Fetch existing record + Build new one + current_record = DME.getRecordByName(record_name) + new_record = {'name': record_name} + for i in ["record_value", "record_type", "record_ttl"]: + if module.params[i]: + new_record[i[len("record_"):]] = module.params[i] + + # Compare new record against existing one + changed = False + if current_record: + for i in new_record: + if str(current_record[i]) != str(new_record[i]): + changed = True + new_record['id'] = str(current_record['id']) + + # Follow Keyword Controlled Behavior + if state == 'present': + # return the record if no value is specified + if not "value" in new_record: + if not current_record: + module.fail_json( + msg="A record with name '%s' does not exist for domain '%s.'" % (record_name, domain)) + module.exit_json(changed=False, result=current_record) + + # create record as it does not exist + if not current_record: + record = DME.createRecord(DME.prepareRecord(new_record)) + module.exit_json(changed=True, result=record) + + # update the record + if changed: + DME.updateRecord( + current_record['id'], DME.prepareRecord(new_record)) + module.exit_json(changed=True, result=new_record) + + # return the record (no changes) + module.exit_json(changed=False, result=current_record) + + elif state == 'absent': + # delete the record if it exists + if current_record: + DME.deleteRecord(current_record['id']) + module.exit_json(changed=True) + + # record does not exist, return w/o change. + module.exit_json(changed=False) + + else: + module.fail_json( + msg="'%s' is an unknown value for the state argument" % state) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() diff --git a/lib/ansible/modules/extras/net_infrastructure/lldp b/lib/ansible/modules/extras/net_infrastructure/lldp new file mode 100755 index 00000000000..6b8836852f6 --- /dev/null +++ b/lib/ansible/modules/extras/net_infrastructure/lldp @@ -0,0 +1,83 @@ +#!/usr/bin/python -tt +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import subprocess + +DOCUMENTATION = ''' +--- +module: lldp +version_added: 1.6 +short_description: get details reported by lldp +description: + - Reads data out of lldpctl +options: {} +author: Andy Hill +notes: + - Requires lldpd running and lldp enabled on switches +''' + +EXAMPLES = ''' +# Retrieve switch/port information + - name: Gather information from lldp + lldp: + + - name: Print each switch/port + debug: msg="{{ lldp[item]['chassis']['name'] }} / {{ lldp[item]['port']['ifalias'] }} + with_items: lldp.keys() + +# TASK: [Print each switch/port] *********************************************************** +# ok: [10.13.0.22] => (item=eth2) => {"item": "eth2", "msg": "switch1.example.com / Gi0/24"} +# ok: [10.13.0.22] => (item=eth1) => {"item": "eth1", "msg": "switch2.example.com / Gi0/3"} +# ok: [10.13.0.22] => (item=eth0) => {"item": "eth0", "msg": "switch3.example.com / Gi0/3"} + +''' + +def gather_lldp(): + cmd = ['lldpctl', '-f', 'keyvalue'] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE) + (output, err) = proc.communicate() + if output: + output_dict = {} + lldp_entries = output.split("\n") + + for entry in lldp_entries: + if entry: + path, value = entry.strip().split("=", 1) + path = path.split(".") + path_components, final = path[:-1], path[-1] + + current_dict = output_dict + for path_component in path_components: + current_dict[path_component] = current_dict.get(path_component, {}) + current_dict = current_dict[path_component] + current_dict[final] = value + return output_dict + + +def main(): + module = AnsibleModule({}) + + lldp_output = gather_lldp() + try: + data = {'lldp': lldp_output['lldp']} + module.exit_json(ansible_facts=data) + except TypeError: + module.fail_json(msg="lldpctl command failed. is lldpd running?") + +# import module snippets +from ansible.module_utils.basic import * +main() + diff --git a/lib/ansible/modules/extras/net_infrastructure/netscaler b/lib/ansible/modules/extras/net_infrastructure/netscaler new file mode 100644 index 00000000000..de3c8fc2421 --- /dev/null +++ b/lib/ansible/modules/extras/net_infrastructure/netscaler @@ -0,0 +1,190 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +""" +Ansible module to manage Citrix NetScaler entities +(c) 2013, Nandor Sivok + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +DOCUMENTATION = ''' +--- +module: netscaler +version_added: "1.1" +short_description: Manages Citrix NetScaler entities +description: + - Manages Citrix NetScaler server and service entities. +options: + nsc_host: + description: + - hostname or ip of your netscaler + required: true + default: null + aliases: [] + nsc_protocol: + description: + - protocol used to access netscaler + required: false + default: https + aliases: [] + user: + description: + - username + required: true + default: null + aliases: [] + password: + description: + - password + required: true + default: null + aliases: [] + action: + description: + - the action you want to perform on the entity + required: false + default: disable + choices: ["enable", "disable"] + aliases: [] + name: + description: + - name of the entity + required: true + default: hostname + aliases: [] + type: + description: + - type of the entity + required: false + default: server + choices: ["server", "service"] + aliases: [] + validate_certs: + description: + - If C(no), SSL certificates for the target url will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + +requirements: [ "urllib", "urllib2" ] +author: Nandor Sivok +''' + +EXAMPLES = ''' +# Disable the server +ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass" + +# Enable the server +ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass action=enable" + +# Disable the service local:8080 +ansible host -m netscaler -a "nsc_host=nsc.example.com user=apiuser password=apipass name=local:8080 type=service action=disable" +''' + + +import json +import base64 +import socket + + +class netscaler(object): + + _nitro_base_url = '/nitro/v1/' + + def __init__(self, module): + self.module = module + + def http_request(self, api_endpoint, data_json={}): + request_url = self._nsc_protocol + '://' + self._nsc_host + self._nitro_base_url + api_endpoint + + data_json = urllib.urlencode(data_json) + if not len(data_json): + data_json = None + + auth = base64.encodestring('%s:%s' % (self._nsc_user, self._nsc_pass)).replace('\n', '').strip() + headers = { + 'Authorization': 'Basic %s' % auth, + 'Content-Type' : 'application/x-www-form-urlencoded', + } + + response, info = fetch_url(self.module, request_url, data=data_json, headers=headers) + + return json.load(response) + + def prepare_request(self, action): + resp = self.http_request( + 'config', + { + "object": + { + "params": {"action": action}, + self._type: {"name": self._name} + } + } + ) + + return resp + + +def core(module): + n = netscaler(module) + n._nsc_host = module.params.get('nsc_host') + n._nsc_user = module.params.get('user') + n._nsc_pass = module.params.get('password') + n._nsc_protocol = module.params.get('nsc_protocol') + n._name = module.params.get('name') + n._type = module.params.get('type') + action = module.params.get('action') + + r = n.prepare_request(action) + + return r['errorcode'], r + + +def main(): + + module = AnsibleModule( + argument_spec = dict( + nsc_host = dict(required=True), + nsc_protocol = dict(default='https'), + user = dict(required=True), + password = dict(required=True), + action = dict(default='enable', choices=['enable','disable']), + name = dict(default=socket.gethostname()), + type = dict(default='server', choices=['service', 'server']), + validate_certs=dict(default='yes', type='bool'), + ) + ) + + rc = 0 + try: + rc, result = core(module) + except Exception, e: + module.fail_json(msg=str(e)) + + if rc != 0: + module.fail_json(rc=rc, msg=result) + else: + result['changed'] = True + module.exit_json(**result) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +main() diff --git a/lib/ansible/modules/extras/net_infrastructure/openvswitch_bridge b/lib/ansible/modules/extras/net_infrastructure/openvswitch_bridge new file mode 100644 index 00000000000..551ca707a2d --- /dev/null +++ b/lib/ansible/modules/extras/net_infrastructure/openvswitch_bridge @@ -0,0 +1,135 @@ +#!/usr/bin/python +#coding: utf-8 -*- + +# (c) 2013, David Stygstra +# +# This file is part of Ansible +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +DOCUMENTATION = ''' +--- +module: openvswitch_bridge +version_added: 1.4 +author: David Stygstra +short_description: Manage Open vSwitch bridges +requirements: [ ovs-vsctl ] +description: + - Manage Open vSwitch bridges +options: + bridge: + required: true + description: + - Name of bridge to manage + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the bridge should exist + timeout: + required: false + default: 5 + description: + - How long to wait for ovs-vswitchd to respond +''' + +EXAMPLES = ''' +# Create a bridge named br-int +- openvswitch_bridge: bridge=br-int state=present +''' + + +class OVSBridge(object): + def __init__(self, module): + self.module = module + self.bridge = module.params['bridge'] + self.state = module.params['state'] + self.timeout = module.params['timeout'] + + def _vsctl(self, command): + '''Run ovs-vsctl command''' + return self.module.run_command(['ovs-vsctl', '-t', str(self.timeout)] + command) + + def exists(self): + '''Check if the bridge already exists''' + rc, _, err = self._vsctl(['br-exists', self.bridge]) + if rc == 0: # See ovs-vsctl(8) for status codes + return True + if rc == 2: + return False + raise Exception(err) + + def add(self): + '''Create the bridge''' + rc, _, err = self._vsctl(['add-br', self.bridge]) + if rc != 0: + raise Exception(err) + + def delete(self): + '''Delete the bridge''' + rc, _, err = self._vsctl(['del-br', self.bridge]) + if rc != 0: + raise Exception(err) + + def check(self): + '''Run check mode''' + try: + if self.state == 'absent' and self.exists(): + changed = True + elif self.state == 'present' and not self.exists(): + changed = True + else: + changed = False + except Exception, e: + self.module.fail_json(msg=str(e)) + self.module.exit_json(changed=changed) + + def run(self): + '''Make the necessary changes''' + changed = False + try: + if self.state == 'absent': + if self.exists(): + self.delete() + changed = True + elif self.state == 'present': + if not self.exists(): + self.add() + changed = True + except Exception, e: + self.module.fail_json(msg=str(e)) + self.module.exit_json(changed=changed) + + +def main(): + module = AnsibleModule( + argument_spec={ + 'bridge': {'required': True}, + 'state': {'default': 'present', 'choices': ['present', 'absent']}, + 'timeout': {'default': 5, 'type': 'int'} + }, + supports_check_mode=True, + ) + + br = OVSBridge(module) + if module.check_mode: + br.check() + else: + br.run() + + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/net_infrastructure/openvswitch_port b/lib/ansible/modules/extras/net_infrastructure/openvswitch_port new file mode 100644 index 00000000000..66391937d1b --- /dev/null +++ b/lib/ansible/modules/extras/net_infrastructure/openvswitch_port @@ -0,0 +1,139 @@ +#!/usr/bin/python +#coding: utf-8 -*- + +# (c) 2013, David Stygstra +# +# This file is part of Ansible +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +DOCUMENTATION = ''' +--- +module: openvswitch_port +version_added: 1.4 +author: David Stygstra +short_description: Manage Open vSwitch ports +requirements: [ ovs-vsctl ] +description: + - Manage Open vSwitch ports +options: + bridge: + required: true + description: + - Name of bridge to manage + port: + required: true + description: + - Name of port to manage on the bridge + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the port should exist + timeout: + required: false + default: 5 + description: + - How long to wait for ovs-vswitchd to respond +''' + +EXAMPLES = ''' +# Creates port eth2 on bridge br-ex +- openvswitch_port: bridge=br-ex port=eth2 state=present +''' + + +class OVSPort(object): + def __init__(self, module): + self.module = module + self.bridge = module.params['bridge'] + self.port = module.params['port'] + self.state = module.params['state'] + self.timeout = module.params['timeout'] + + def _vsctl(self, command): + '''Run ovs-vsctl command''' + return self.module.run_command(['ovs-vsctl', '-t', str(self.timeout)] + command) + + def exists(self): + '''Check if the port already exists''' + rc, out, err = self._vsctl(['list-ports', self.bridge]) + if rc != 0: + raise Exception(err) + return any(port.rstrip() == self.port for port in out.split('\n')) + + def add(self): + '''Add the port''' + rc, _, err = self._vsctl(['add-port', self.bridge, self.port]) + if rc != 0: + raise Exception(err) + + def delete(self): + '''Remove the port''' + rc, _, err = self._vsctl(['del-port', self.bridge, self.port]) + if rc != 0: + raise Exception(err) + + def check(self): + '''Run check mode''' + try: + if self.state == 'absent' and self.exists(): + changed = True + elif self.state == 'present' and not self.exists(): + changed = True + else: + changed = False + except Exception, e: + self.module.fail_json(msg=str(e)) + self.module.exit_json(changed=changed) + + def run(self): + '''Make the necessary changes''' + changed = False + try: + if self.state == 'absent': + if self.exists(): + self.delete() + changed = True + elif self.state == 'present': + if not self.exists(): + self.add() + changed = True + except Exception, e: + self.module.fail_json(msg=str(e)) + self.module.exit_json(changed=changed) + + +def main(): + module = AnsibleModule( + argument_spec={ + 'bridge': {'required': True}, + 'port': {'required': True}, + 'state': {'default': 'present', 'choices': ['present', 'absent']}, + 'timeout': {'default': 5, 'type': 'int'} + }, + supports_check_mode=True, + ) + + port = OVSPort(module) + if module.check_mode: + port.check() + else: + port.run() + + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/notification/campfire b/lib/ansible/modules/extras/notification/campfire new file mode 100644 index 00000000000..31e69fc5459 --- /dev/null +++ b/lib/ansible/modules/extras/notification/campfire @@ -0,0 +1,143 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: campfire +version_added: "1.2" +short_description: Send a message to Campfire +description: + - Send a message to Campfire. + - Messages with newlines will result in a "Paste" message being sent. +version_added: "1.2" +options: + subscription: + description: + - The subscription name to use. + required: true + token: + description: + - API token. + required: true + room: + description: + - Room number to which the message should be sent. + required: true + msg: + description: + - The message body. + required: true + notify: + description: + - Send a notification sound before the message. + required: false + choices: ["56k", "bell", "bezos", "bueller", "clowntown", + "cottoneyejoe", "crickets", "dadgummit", "dangerzone", + "danielsan", "deeper", "drama", "greatjob", "greyjoy", + "guarantee", "heygirl", "horn", "horror", + "inconceivable", "live", "loggins", "makeitso", "noooo", + "nyan", "ohmy", "ohyeah", "pushit", "rimshot", + "rollout", "rumble", "sax", "secret", "sexyback", + "story", "tada", "tmyk", "trololo", "trombone", "unix", + "vuvuzela", "what", "whoomp", "yeah", "yodel"] + +# informational: requirements for nodes +requirements: [ urllib2, cgi ] +author: Adam Garside +''' + +EXAMPLES = ''' +- campfire: subscription=foo token=12345 room=123 msg="Task completed." + +- campfire: subscription=foo token=12345 room=123 notify=loggins + msg="Task completed ... with feeling." +''' + + +def main(): + + try: + import urllib2 + except ImportError: + module.fail_json(msg="urllib2 is required") + + try: + import cgi + except ImportError: + module.fail_json(msg="cgi is required") + + module = AnsibleModule( + argument_spec=dict( + subscription=dict(required=True), + token=dict(required=True), + room=dict(required=True), + msg=dict(required=True), + notify=dict(required=False, + choices=["56k", "bell", "bezos", "bueller", + "clowntown", "cottoneyejoe", + "crickets", "dadgummit", "dangerzone", + "danielsan", "deeper", "drama", + "greatjob", "greyjoy", "guarantee", + "heygirl", "horn", "horror", + "inconceivable", "live", "loggins", + "makeitso", "noooo", "nyan", "ohmy", + "ohyeah", "pushit", "rimshot", + "rollout", "rumble", "sax", "secret", + "sexyback", "story", "tada", "tmyk", + "trololo", "trombone", "unix", + "vuvuzela", "what", "whoomp", "yeah", + "yodel"]), + ), + supports_check_mode=False + ) + + subscription = module.params["subscription"] + token = module.params["token"] + room = module.params["room"] + msg = module.params["msg"] + notify = module.params["notify"] + + URI = "https://%s.campfirenow.com" % subscription + NSTR = "SoundMessage%s" + MSTR = "%s" + AGENT = "Ansible/1.2" + + try: + + # Setup basic auth using token as the username + pm = urllib2.HTTPPasswordMgrWithDefaultRealm() + pm.add_password(None, URI, token, 'X') + + # Setup Handler and define the opener for the request + handler = urllib2.HTTPBasicAuthHandler(pm) + opener = urllib2.build_opener(handler) + + target_url = '%s/room/%s/speak.xml' % (URI, room) + + # Send some audible notification if requested + if notify: + req = urllib2.Request(target_url, NSTR % cgi.escape(notify)) + req.add_header('Content-Type', 'application/xml') + req.add_header('User-agent', AGENT) + response = opener.open(req) + + # Send the message + req = urllib2.Request(target_url, MSTR % cgi.escape(msg)) + req.add_header('Content-Type', 'application/xml') + req.add_header('User-agent', AGENT) + response = opener.open(req) + + except urllib2.HTTPError, e: + if not (200 <= e.code < 300): + module.fail_json(msg="unable to send msg: '%s', campfire api" + " returned error code: '%s'" % + (msg, e.code)) + + except Exception, e: + module.fail_json(msg="unable to send msg: %s" % msg) + + module.exit_json(changed=True, room=room, msg=msg, notify=notify) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/notification/flowdock b/lib/ansible/modules/extras/notification/flowdock new file mode 100644 index 00000000000..009487fb438 --- /dev/null +++ b/lib/ansible/modules/extras/notification/flowdock @@ -0,0 +1,192 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2013 Matt Coddington +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: flowdock +version_added: "1.2" +author: Matt Coddington +short_description: Send a message to a flowdock +description: + - Send a message to a flowdock team inbox or chat using the push API (see https://www.flowdock.com/api/team-inbox and https://www.flowdock.com/api/chat) +options: + token: + description: + - API token. + required: true + type: + description: + - Whether to post to 'inbox' or 'chat' + required: true + choices: [ "inbox", "chat" ] + msg: + description: + - Content of the message + required: true + tags: + description: + - tags of the message, separated by commas + required: false + external_user_name: + description: + - (chat only - required) Name of the "user" sending the message + required: false + from_address: + description: + - (inbox only - required) Email address of the message sender + required: false + source: + description: + - (inbox only - required) Human readable identifier of the application that uses the Flowdock API + required: false + subject: + description: + - (inbox only - required) Subject line of the message + required: false + from_name: + description: + - (inbox only) Name of the message sender + required: false + reply_to: + description: + - (inbox only) Email address for replies + required: false + project: + description: + - (inbox only) Human readable identifier for more detailed message categorization + required: false + link: + description: + - (inbox only) Link associated with the message. This will be used to link the message subject in Team Inbox. + required: false + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 + +# informational: requirements for nodes +requirements: [ urllib, urllib2 ] +''' + +EXAMPLES = ''' +- flowdock: type=inbox + token=AAAAAA + from_address=user@example.com + source='my cool app' + msg='test from ansible' + subject='test subject' + +- flowdock: type=chat + token=AAAAAA + external_user_name=testuser + msg='test from ansible' + tags=tag1,tag2,tag3 +''' + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True), + msg=dict(required=True), + type=dict(required=True, choices=["inbox","chat"]), + external_user_name=dict(required=False), + from_address=dict(required=False), + source=dict(required=False), + subject=dict(required=False), + from_name=dict(required=False), + reply_to=dict(required=False), + project=dict(required=False), + tags=dict(required=False), + link=dict(required=False), + validate_certs = dict(default='yes', type='bool'), + ), + supports_check_mode=True + ) + + type = module.params["type"] + token = module.params["token"] + if type == 'inbox': + url = "https://api.flowdock.com/v1/messages/team_inbox/%s" % (token) + else: + url = "https://api.flowdock.com/v1/messages/chat/%s" % (token) + + params = {} + + # required params + params['content'] = module.params["msg"] + + # required params for the 'chat' type + if module.params['external_user_name']: + if type == 'inbox': + module.fail_json(msg="external_user_name is not valid for the 'inbox' type") + else: + params['external_user_name'] = module.params["external_user_name"] + elif type == 'chat': + module.fail_json(msg="%s is required for the 'inbox' type" % item) + + # required params for the 'inbox' type + for item in [ 'from_address', 'source', 'subject' ]: + if module.params[item]: + if type == 'chat': + module.fail_json(msg="%s is not valid for the 'chat' type" % item) + else: + params[item] = module.params[item] + elif type == 'inbox': + module.fail_json(msg="%s is required for the 'inbox' type" % item) + + # optional params + if module.params["tags"]: + params['tags'] = module.params["tags"] + + # optional params for the 'inbox' type + for item in [ 'from_name', 'reply_to', 'project', 'link' ]: + if module.params[item]: + if type == 'chat': + module.fail_json(msg="%s is not valid for the 'chat' type" % item) + else: + params[item] = module.params[item] + + # If we're in check mode, just exit pretending like we succeeded + if module.check_mode: + module.exit_json(changed=False) + + # Send the data to Flowdock + data = urllib.urlencode(params) + response, info = fetch_url(module, url, data=data) + if info['status'] != 200: + module.fail_json(msg="unable to send msg: %s" % info['msg']) + + module.exit_json(changed=True, msg=module.params["msg"]) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() + diff --git a/lib/ansible/modules/extras/notification/grove b/lib/ansible/modules/extras/notification/grove new file mode 100644 index 00000000000..e6bf241bdaa --- /dev/null +++ b/lib/ansible/modules/extras/notification/grove @@ -0,0 +1,99 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: grove +version_added: 1.4 +short_description: Sends a notification to a grove.io channel +description: + - The M(grove) module sends a message for a service to a Grove.io + channel. +options: + channel_token: + description: + - Token of the channel to post to. + required: true + service: + description: + - Name of the service (displayed as the "user" in the message) + required: false + default: ansible + message: + description: + - Message content + required: true + url: + description: + - Service URL for the web client + required: false + icon_url: + description: + - Icon for the service + required: false + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 +author: Jonas Pfenniger +''' + +EXAMPLES = ''' +- grove: > + channel_token=6Ph62VBBJOccmtTPZbubiPzdrhipZXtg + service=my-app + message=deployed {{ target }} +''' + +BASE_URL = 'https://grove.io/api/notice/%s/' + +# ============================================================== +# do_notify_grove + +def do_notify_grove(module, channel_token, service, message, url=None, icon_url=None): + my_url = BASE_URL % (channel_token,) + + my_data = dict(service=service, message=message) + if url is not None: + my_data['url'] = url + if icon_url is not None: + my_data['icon_url'] = icon_url + + data = urllib.urlencode(my_data) + response, info = fetch_url(module, my_url, data=data) + if info['status'] != 200: + module.fail_json(msg="failed to send notification: %s" % info['msg']) + +# ============================================================== +# main + +def main(): + module = AnsibleModule( + argument_spec = dict( + channel_token = dict(type='str', required=True), + message = dict(type='str', required=True), + service = dict(type='str', default='ansible'), + url = dict(type='str', default=None), + icon_url = dict(type='str', default=None), + validate_certs = dict(default='yes', type='bool'), + ) + ) + + channel_token = module.params['channel_token'] + service = module.params['service'] + message = module.params['message'] + url = module.params['url'] + icon_url = module.params['icon_url'] + + do_notify_grove(module, channel_token, service, message, url, icon_url) + + # Mission complete + module.exit_json(msg="OK") + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/notification/hipchat b/lib/ansible/modules/extras/notification/hipchat new file mode 100644 index 00000000000..4ff95b32bf6 --- /dev/null +++ b/lib/ansible/modules/extras/notification/hipchat @@ -0,0 +1,149 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: hipchat +version_added: "1.2" +short_description: Send a message to hipchat +description: + - Send a message to hipchat +options: + token: + description: + - API token. + required: true + room: + description: + - ID or name of the room. + required: true + from: + description: + - Name the message will appear be sent from. max 15 characters. + Over 15, will be shorten. + required: false + default: Ansible + msg: + description: + - The message body. + required: true + default: null + color: + description: + - Background color for the message. Default is yellow. + required: false + default: yellow + choices: [ "yellow", "red", "green", "purple", "gray", "random" ] + msg_format: + description: + - message format. html or text. Default is text. + required: false + default: text + choices: [ "text", "html" ] + notify: + description: + - notify or not (change the tab color, play a sound, etc) + required: false + default: 'yes' + choices: [ "yes", "no" ] + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + version_added: 1.5.1 + api: + description: + - API url if using a self-hosted hipchat server + required: false + default: 'https://api.hipchat.com/v1/rooms/message' + version_added: 1.6.0 + + +# informational: requirements for nodes +requirements: [ urllib, urllib2 ] +author: WAKAYAMA Shirou +''' + +EXAMPLES = ''' +- hipchat: token=AAAAAA room=notify msg="Ansible task finished" +''' + +# =========================================== +# HipChat module specific support methods. +# + +MSG_URI = "https://api.hipchat.com/v1/rooms/message" + +def send_msg(module, token, room, msg_from, msg, msg_format='text', + color='yellow', notify=False, api=MSG_URI): + '''sending message to hipchat''' + + params = {} + params['room_id'] = room + params['from'] = msg_from[:15] # max length is 15 + params['message'] = msg + params['message_format'] = msg_format + params['color'] = color + params['api'] = api + + if notify: + params['notify'] = 1 + else: + params['notify'] = 0 + + url = api + "?auth_token=%s" % (token) + data = urllib.urlencode(params) + response, info = fetch_url(module, url, data=data) + if info['status'] == 200: + return response.read() + else: + module.fail_json(msg="failed to send message, return status=%s" % str(info['status'])) + + +# =========================================== +# Module execution. +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + token=dict(required=True), + room=dict(required=True), + msg=dict(required=True), + msg_from=dict(default="Ansible", aliases=['from']), + color=dict(default="yellow", choices=["yellow", "red", "green", + "purple", "gray", "random"]), + msg_format=dict(default="text", choices=["text", "html"]), + notify=dict(default=True, type='bool'), + validate_certs = dict(default='yes', type='bool'), + api = dict(default=MSG_URI), + ), + supports_check_mode=True + ) + + token = module.params["token"] + room = module.params["room"] + msg = module.params["msg"] + msg_from = module.params["msg_from"] + color = module.params["color"] + msg_format = module.params["msg_format"] + notify = module.params["notify"] + api = module.params["api"] + + try: + send_msg(module, token, room, msg_from, msg, msg_format, color, notify, api) + except Exception, e: + module.fail_json(msg="unable to sent msg: %s" % e) + + changed = True + module.exit_json(changed=changed, room=room, msg_from=msg_from, msg=msg) + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() diff --git a/lib/ansible/modules/extras/notification/irc b/lib/ansible/modules/extras/notification/irc new file mode 100644 index 00000000000..a90834f820d --- /dev/null +++ b/lib/ansible/modules/extras/notification/irc @@ -0,0 +1,215 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Jan-Piet Mens +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: irc +version_added: "1.2" +short_description: Send a message to an IRC channel +description: + - Send a message to an IRC channel. This is a very simplistic implementation. +options: + server: + description: + - IRC server name/address + required: false + default: localhost + port: + description: + - IRC server port number + required: false + default: 6667 + nick: + description: + - Nickname. May be shortened, depending on server's NICKLEN setting. + required: false + default: ansible + msg: + description: + - The message body. + required: true + default: null + color: + description: + - Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none"). + required: false + default: "none" + choices: [ "none", "yellow", "red", "green", "blue", "black" ] + channel: + description: + - Channel name + required: true + key: + description: + - Channel key + required: false + version_added: 1.7 + passwd: + description: + - Server password + required: false + timeout: + description: + - Timeout to use while waiting for successful registration and join + messages, this is to prevent an endless loop + default: 30 + version_added: 1.5 + use_ssl: + description: + - Designates whether TLS/SSL should be used when connecting to the IRC server + default: False + version_added: 1.8 + +# informational: requirements for nodes +requirements: [ socket ] +author: Jan-Piet Mens, Matt Martz +''' + +EXAMPLES = ''' +- irc: server=irc.example.net channel="#t1" msg="Hello world" + +- local_action: irc port=6669 + channel="#t1" + msg="All finished at {{ ansible_date_time.iso8601 }}" + color=red + nick=ansibleIRC +''' + +# =========================================== +# IRC module support methods. +# + +import re +import socket +import ssl + +from time import sleep + + +def send_msg(channel, msg, server='localhost', port='6667', key=None, + nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False): + '''send message to IRC''' + + colornumbers = { + 'black': "01", + 'red': "04", + 'green': "09", + 'yellow': "08", + 'blue': "12", + } + + try: + colornumber = colornumbers[color] + colortext = "\x03" + colornumber + except: + colortext = "" + + message = colortext + msg + + irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if use_ssl: + irc = ssl.wrap_socket(irc) + irc.connect((server, int(port))) + if passwd: + irc.send('PASS %s\r\n' % passwd) + irc.send('NICK %s\r\n' % nick) + irc.send('USER %s %s %s :ansible IRC\r\n' % (nick, nick, nick)) + motd = '' + start = time.time() + while 1: + motd += irc.recv(1024) + # The server might send back a shorter nick than we specified (due to NICKLEN), + # so grab that and use it from now on (assuming we find the 00[1-4] response). + match = re.search('^:\S+ 00[1-4] (?P\S+) :', motd, flags=re.M) + if match: + nick = match.group('nick') + break + elif time.time() - start > timeout: + raise Exception('Timeout waiting for IRC server welcome response') + sleep(0.5) + + if key: + irc.send('JOIN %s %s\r\n' % (channel, key)) + else: + irc.send('JOIN %s\r\n' % channel) + + join = '' + start = time.time() + while 1: + join += irc.recv(1024) + if re.search('^:\S+ 366 %s %s :' % (nick, channel), join, flags=re.M): + break + elif time.time() - start > timeout: + raise Exception('Timeout waiting for IRC JOIN response') + sleep(0.5) + + irc.send('PRIVMSG %s :%s\r\n' % (channel, message)) + sleep(1) + irc.send('PART %s\r\n' % channel) + irc.send('QUIT\r\n') + sleep(1) + irc.close() + +# =========================================== +# Main +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + server=dict(default='localhost'), + port=dict(default=6667), + nick=dict(default='ansible'), + msg=dict(required=True), + color=dict(default="none", choices=["yellow", "red", "green", + "blue", "black", "none"]), + channel=dict(required=True), + key=dict(), + passwd=dict(), + timeout=dict(type='int', default=30), + use_ssl=dict(type='bool', default=False) + ), + supports_check_mode=True + ) + + server = module.params["server"] + port = module.params["port"] + nick = module.params["nick"] + msg = module.params["msg"] + color = module.params["color"] + channel = module.params["channel"] + key = module.params["key"] + passwd = module.params["passwd"] + timeout = module.params["timeout"] + use_ssl = module.params["use_ssl"] + + try: + send_msg(channel, msg, server, port, key, nick, color, passwd, timeout, use_ssl) + except Exception, e: + module.fail_json(msg="unable to send to IRC: %s" % e) + + module.exit_json(changed=False, channel=channel, nick=nick, + msg=msg) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/notification/jabber b/lib/ansible/modules/extras/notification/jabber new file mode 100644 index 00000000000..8a7eed37b33 --- /dev/null +++ b/lib/ansible/modules/extras/notification/jabber @@ -0,0 +1,146 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +version_added: "1.2" +module: jabber +short_description: Send a message to jabber user or chat room +description: + - Send a message to jabber +options: + user: + description: + User as which to connect + required: true + password: + description: + password for user to connect + required: true + to: + description: + user ID or name of the room, when using room use a slash to indicate your nick. + required: true + msg: + description: + - The message body. + required: true + default: null + host: + description: + host to connect, overrides user info + required: false + port: + description: + port to connect to, overrides default + required: false + default: 5222 + encoding: + description: + message encoding + required: false + +# informational: requirements for nodes +requirements: [ xmpp ] +author: Brian Coca +''' + +EXAMPLES = ''' +# send a message to a user +- jabber: user=mybot@example.net + password=secret + to=friend@example.net + msg="Ansible task finished" + +# send a message to a room +- jabber: user=mybot@example.net + password=secret + to=mychaps@conference.example.net/ansiblebot + msg="Ansible task finished" + +# send a message, specifying the host and port +- jabber user=mybot@example.net + host=talk.example.net + port=5223 + password=secret + to=mychaps@example.net + msg="Ansible task finished" +''' + +import os +import re +import time + +HAS_XMPP = True +try: + import xmpp +except ImportError: + HAS_XMPP = False + +def main(): + + module = AnsibleModule( + argument_spec=dict( + user=dict(required=True), + password=dict(required=True), + to=dict(required=True), + msg=dict(required=True), + host=dict(required=False), + port=dict(required=False,default=5222), + encoding=dict(required=False), + ), + supports_check_mode=True + ) + + if not HAS_XMPP: + module.fail_json(msg="xmpp is not installed") + + jid = xmpp.JID(module.params['user']) + user = jid.getNode() + server = jid.getDomain() + port = module.params['port'] + password = module.params['password'] + try: + to, nick = module.params['to'].split('/', 1) + except ValueError: + to, nick = module.params['to'], None + + if module.params['host']: + host = module.params['host'] + else: + host = server + if module.params['encoding']: + xmpp.simplexml.ENCODING = params['encoding'] + + msg = xmpp.protocol.Message(body=module.params['msg']) + + try: + conn=xmpp.Client(server) + if not conn.connect(server=(host,port)): + module.fail_json(rc=1, msg='Failed to connect to server: %s' % (server)) + if not conn.auth(user,password,'Ansible'): + module.fail_json(rc=1, msg='Failed to authorize %s on: %s' % (user,server)) + # some old servers require this, also the sleep following send + conn.sendInitPresence(requestRoster=0) + + if nick: # sending to room instead of user, need to join + msg.setType('groupchat') + msg.setTag('x', namespace='http://jabber.org/protocol/muc#user') + conn.send(xmpp.Presence(to=module.params['to'])) + time.sleep(1) + else: + msg.setType('chat') + + msg.setTo(to) + if not module.check_mode: + conn.send(msg) + time.sleep(1) + conn.disconnect() + except Exception, e: + module.fail_json(msg="unable to send msg: %s" % e) + + module.exit_json(changed=False, to=to, user=user, msg=msg.getBody()) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/notification/mail b/lib/ansible/modules/extras/notification/mail new file mode 100644 index 00000000000..34cd3a09bf3 --- /dev/null +++ b/lib/ansible/modules/extras/notification/mail @@ -0,0 +1,252 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright 2012 Dag Wieers +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +--- +author: Dag Wieers +module: mail +short_description: Send an email +description: + - This module is useful for sending emails from playbooks. + - One may wonder why automate sending emails? In complex environments + there are from time to time processes that cannot be automated, either + because you lack the authority to make it so, or because not everyone + agrees to a common approach. + - If you cannot automate a specific step, but the step is non-blocking, + sending out an email to the responsible party to make him perform his + part of the bargain is an elegant way to put the responsibility in + someone else's lap. + - Of course sending out a mail can be equally useful as a way to notify + one or more people in a team that a specific action has been + (successfully) taken. +version_added: "0.8" +options: + from: + description: + - The email-address the mail is sent from. May contain address and phrase. + default: root + required: false + to: + description: + - The email-address(es) the mail is being sent to. This is + a comma-separated list, which may contain address and phrase portions. + default: root + required: false + cc: + description: + - The email-address(es) the mail is being copied to. This is + a comma-separated list, which may contain address and phrase portions. + required: false + bcc: + description: + - The email-address(es) the mail is being 'blind' copied to. This is + a comma-separated list, which may contain address and phrase portions. + required: false + subject: + description: + - The subject of the email being sent. + aliases: [ msg ] + required: true + body: + description: + - The body of the email being sent. + default: $subject + required: false + host: + description: + - The mail server + default: 'localhost' + required: false + port: + description: + - The mail server port + default: '25' + required: false + version_added: "1.0" + attach: + description: + - A space-separated list of pathnames of files to attach to the message. + Attached files will have their content-type set to C(application/octet-stream). + default: null + required: false + version_added: "1.0" + headers: + description: + - A vertical-bar-separated list of headers which should be added to the message. + Each individual header is specified as C(header=value) (see example below). + default: null + required: false + version_added: "1.0" + charset: + description: + - The character set of email being sent + default: 'us-ascii' + required: false +""" + +EXAMPLES = ''' +# Example playbook sending mail to root +- local_action: mail msg='System {{ ansible_hostname }} has been successfully provisioned.' + +# Send e-mail to a bunch of users, attaching files +- local_action: mail + host='127.0.0.1' + port=2025 + subject="Ansible-report" + body="Hello, this is an e-mail. I hope you like it ;-)" + from="jane@example.net (Jane Jolie)" + to="John Doe , Suzie Something " + cc="Charlie Root " + attach="/etc/group /tmp/pavatar2.png" + headers=Reply-To=john@example.com|X-Special="Something or other" + charset=utf8 +''' + +import os +import sys +import smtplib + +try: + from email import encoders + import email.utils + from email.utils import parseaddr, formataddr + from email.mime.base import MIMEBase + from mail.mime.multipart import MIMEMultipart + from email.mime.text import MIMEText +except ImportError: + from email import Encoders as encoders + import email.Utils + from email.Utils import parseaddr, formataddr + from email.MIMEBase import MIMEBase + from email.MIMEMultipart import MIMEMultipart + from email.MIMEText import MIMEText + +def main(): + + module = AnsibleModule( + argument_spec = dict( + host = dict(default='localhost'), + port = dict(default='25'), + sender = dict(default='root', aliases=['from']), + to = dict(default='root', aliases=['recipients']), + cc = dict(default=None), + bcc = dict(default=None), + subject = dict(required=True, aliases=['msg']), + body = dict(default=None), + attach = dict(default=None), + headers = dict(default=None), + charset = dict(default='us-ascii') + ) + ) + + host = module.params.get('host') + port = module.params.get('port') + sender = module.params.get('sender') + recipients = module.params.get('to') + copies = module.params.get('cc') + blindcopies = module.params.get('bcc') + subject = module.params.get('subject') + body = module.params.get('body') + attach_files = module.params.get('attach') + headers = module.params.get('headers') + charset = module.params.get('charset') + + sender_phrase, sender_addr = parseaddr(sender) + + if not body: + body = subject + + try: + smtp = smtplib.SMTP(host, port=int(port)) + except Exception, e: + module.fail_json(rc=1, msg='Failed to send mail to server %s on port %s: %s' % (host, port, e)) + + + msg = MIMEMultipart() + msg['Subject'] = subject + msg['From'] = formataddr((sender_phrase, sender_addr)) + msg.preamble = "Multipart message" + + if headers is not None: + for hdr in [x.strip() for x in headers.split('|')]: + try: + h_key, h_val = hdr.split('=') + msg.add_header(h_key, h_val) + except: + pass + + if 'X-Mailer' not in msg: + msg.add_header('X-Mailer', "Ansible") + + to_list = [] + cc_list = [] + addr_list = [] + + if recipients is not None: + for addr in [x.strip() for x in recipients.split(',')]: + to_list.append( formataddr( parseaddr(addr)) ) + addr_list.append( parseaddr(addr)[1] ) # address only, w/o phrase + if copies is not None: + for addr in [x.strip() for x in copies.split(',')]: + cc_list.append( formataddr( parseaddr(addr)) ) + addr_list.append( parseaddr(addr)[1] ) # address only, w/o phrase + if blindcopies is not None: + for addr in [x.strip() for x in blindcopies.split(',')]: + addr_list.append( parseaddr(addr)[1] ) + + if len(to_list) > 0: + msg['To'] = ", ".join(to_list) + if len(cc_list) > 0: + msg['Cc'] = ", ".join(cc_list) + + part = MIMEText(body + "\n\n", _charset=charset) + msg.attach(part) + + if attach_files is not None: + for file in attach_files.split(): + try: + fp = open(file, 'rb') + + part = MIMEBase('application', 'octet-stream') + part.set_payload(fp.read()) + fp.close() + + encoders.encode_base64(part) + + part.add_header('Content-disposition', 'attachment', filename=os.path.basename(file)) + msg.attach(part) + except Exception, e: + module.fail_json(rc=1, msg="Failed to send mail: can't attach file %s: %s" % (file, e)) + sys.exit() + + composed = msg.as_string() + + try: + smtp.sendmail(sender_addr, set(addr_list), composed) + except Exception, e: + module.fail_json(rc=1, msg='Failed to send mail to %s: %s' % (", ".join(addr_list), e)) + + smtp.quit() + + module.exit_json(changed=False) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/notification/mqtt b/lib/ansible/modules/extras/notification/mqtt new file mode 100644 index 00000000000..d701bd9348a --- /dev/null +++ b/lib/ansible/modules/extras/notification/mqtt @@ -0,0 +1,166 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, 2014, Jan-Piet Mens +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: mqtt +short_description: Publish a message on an MQTT topic for the IoT +version_added: "1.2" +description: + - Publish a message on an MQTT topic. +options: + server: + description: + - MQTT broker address/name + required: false + default: localhost + port: + description: + - MQTT broker port number + required: false + default: 1883 + username: + description: + - Username to authenticate against the broker. + required: false + password: + description: + - Password for C(username) to authenticate against the broker. + required: false + client_id: + description: + - MQTT client identifier + required: false + default: hostname + pid + topic: + description: + - MQTT topic name + required: true + default: null + payload: + description: + - Payload. The special string C("None") may be used to send a NULL + (i.e. empty) payload which is useful to simply notify with the I(topic) + or to clear previously retained messages. + required: true + default: null + qos: + description: + - QoS (Quality of Service) + required: false + default: 0 + choices: [ "0", "1", "2" ] + retain: + description: + - Setting this flag causes the broker to retain (i.e. keep) the message so that + applications that subsequently subscribe to the topic can received the last + retained message immediately. + required: false + default: False + +# informational: requirements for nodes +requirements: [ mosquitto ] +notes: + - This module requires a connection to an MQTT broker such as Mosquitto + U(http://mosquitto.org) and the I(Paho) C(mqtt) Python client (U(https://pypi.python.org/pypi/paho-mqtt)). +author: Jan-Piet Mens +''' + +EXAMPLES = ''' +- local_action: mqtt + topic=service/ansible/{{ ansible_hostname }} + payload="Hello at {{ ansible_date_time.iso8601 }}" + qos=0 + retain=false + client_id=ans001 +''' + +# =========================================== +# MQTT module support methods. +# + +HAS_PAHOMQTT = True +try: + import socket + import paho.mqtt.publish as mqtt +except ImportError: + HAS_PAHOMQTT = False + +# =========================================== +# Main +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + server = dict(default = 'localhost'), + port = dict(default = 1883), + topic = dict(required = True), + payload = dict(required = True), + client_id = dict(default = None), + qos = dict(default="0", choices=["0", "1", "2"]), + retain = dict(default=False, type='bool'), + username = dict(default = None), + password = dict(default = None), + ), + supports_check_mode=True + ) + + if not HAS_PAHOMQTT: + module.fail_json(msg="Paho MQTT is not installed") + + server = module.params.get("server", 'localhost') + port = module.params.get("port", 1883) + topic = module.params.get("topic") + payload = module.params.get("payload") + client_id = module.params.get("client_id", '') + qos = int(module.params.get("qos", 0)) + retain = module.params.get("retain") + username = module.params.get("username", None) + password = module.params.get("password", None) + + if client_id is None: + client_id = "%s_%s" % (socket.getfqdn(), os.getpid()) + + if payload and payload == 'None': + payload = None + + auth=None + if username is not None: + auth = { 'username' : username, 'password' : password } + + try: + rc = mqtt.single(topic, payload, + qos=qos, + retain=retain, + client_id=client_id, + hostname=server, + port=port, + auth=auth) + except Exception, e: + module.fail_json(msg="unable to publish to MQTT broker %s" % (e)) + + module.exit_json(changed=False, topic=topic) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/notification/nexmo b/lib/ansible/modules/extras/notification/nexmo new file mode 100644 index 00000000000..d4898c40cdb --- /dev/null +++ b/lib/ansible/modules/extras/notification/nexmo @@ -0,0 +1,140 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Matt Martz +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: nexmo +short_description: Send a SMS via nexmo +description: + - Send a SMS message via nexmo +version_added: 1.6 +author: Matt Martz +options: + api_key: + description: + - Nexmo API Key + required: true + api_secret: + description: + - Nexmo API Secret + required: true + src: + description: + - Nexmo Number to send from + required: true + dest: + description: + - Phone number(s) to send SMS message to + required: true + msg: + description: + - Message to text to send. Messages longer than 160 characters will be + split into multiple messages + required: true + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: + - 'yes' + - 'no' +""" + +EXAMPLES = """ +- name: Send notification message via Nexmo + local_action: + module: nexmo + api_key: 640c8a53 + api_secret: 0ce239a6 + src: 12345678901 + dest: + - 10987654321 + - 16789012345 + msg: "{{ inventory_hostname }} completed" +""" + + +NEXMO_API = 'https://rest.nexmo.com/sms/json' + + +def send_msg(module): + failed = list() + responses = dict() + msg = { + 'api_key': module.params.get('api_key'), + 'api_secret': module.params.get('api_secret'), + 'from': module.params.get('src'), + 'text': module.params.get('msg') + } + for number in module.params.get('dest'): + msg['to'] = number + url = "%s?%s" % (NEXMO_API, urllib.urlencode(msg)) + + headers = dict(Accept='application/json') + response, info = fetch_url(module, url, headers=headers) + if info['status'] != 200: + failed.append(number) + responses[number] = dict(failed=True) + + try: + responses[number] = json.load(response) + except: + failed.append(number) + responses[number] = dict(failed=True) + else: + for message in responses[number]['messages']: + if int(message['status']) != 0: + failed.append(number) + responses[number] = dict(failed=True, **responses[number]) + + if failed: + msg = 'One or messages failed to send' + else: + msg = '' + + module.exit_json(failed=bool(failed), msg=msg, changed=False, + responses=responses) + + +def main(): + argument_spec = url_argument_spec() + argument_spec.update( + dict( + api_key=dict(required=True, no_log=True), + api_secret=dict(required=True, no_log=True), + src=dict(required=True, type='int'), + dest=dict(required=True, type='list'), + msg=dict(required=True), + ), + ) + + module = AnsibleModule( + argument_spec=argument_spec + ) + + send_msg(module) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() diff --git a/lib/ansible/modules/extras/notification/osx_say b/lib/ansible/modules/extras/notification/osx_say new file mode 100644 index 00000000000..39e3da88c19 --- /dev/null +++ b/lib/ansible/modules/extras/notification/osx_say @@ -0,0 +1,74 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: osx_say +version_added: "1.2" +short_description: Makes an OSX computer to speak. +description: + - makes an OS computer speak! Amuse your friends, annoy your coworkers! +notes: + - If you like this module, you may also be interested in the osx_say callback in the plugins/ directory of the source checkout. +options: + msg: + description: + What to say + required: true + voice: + description: + What voice to use + required: false +requirements: [ say ] +author: Michael DeHaan +''' + +EXAMPLES = ''' +- local_action: osx_say msg="{{inventory_hostname}} is all done" voice=Zarvox +''' + +DEFAULT_VOICE='Trinoids' + +def say(module, msg, voice): + module.run_command(["/usr/bin/say", msg, "--voice=%s" % (voice)], check_rc=True) + +def main(): + + module = AnsibleModule( + argument_spec=dict( + msg=dict(required=True), + voice=dict(required=False, default=DEFAULT_VOICE), + ), + supports_check_mode=False + ) + + if not os.path.exists("/usr/bin/say"): + module.fail_json(msg="/usr/bin/say is not installed") + + msg = module.params['msg'] + voice = module.params['voice'] + + say(module, msg, voice) + + module.exit_json(msg=msg, changed=False) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/notification/slack b/lib/ansible/modules/extras/notification/slack new file mode 100644 index 00000000000..176d6b338fb --- /dev/null +++ b/lib/ansible/modules/extras/notification/slack @@ -0,0 +1,173 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Ramon de la Fuente +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: slack +short_description: Send Slack notifications +description: + - The M(slack) module sends notifications to U(http://slack.com) via the Incoming WebHook integration +version_added: 1.6 +author: Ramon de la Fuente +options: + domain: + description: + - Slack (sub)domain for your environment without protocol. + (i.e. C(future500.slack.com)) + required: true + token: + description: + - Slack integration token + required: true + msg: + description: + - Message to send. + required: true + channel: + description: + - Channel to send the message to. If absent, the message goes to the channel selected for the I(token). + required: false + username: + description: + - This is the sender of the message. + required: false + default: ansible + icon_url: + description: + - Url for the message sender's icon (default C(http://www.ansible.com/favicon.ico)) + required: false + icon_emoji: + description: + - Emoji for the message sender. See Slack documentation for options. + (if I(icon_emoji) is set, I(icon_url) will not be used) + required: false + link_names: + description: + - Automatically create links for channels and usernames in I(msg). + required: false + default: 1 + choices: + - 1 + - 0 + parse: + description: + - Setting for the message parser at Slack + required: false + choices: + - 'full' + - 'none' + validate_certs: + description: + - If C(no), SSL certificates will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: + - 'yes' + - 'no' +""" + +EXAMPLES = """ +- name: Send notification message via Slack + local_action: + module: slack + domain: future500.slack.com + token: thetokengeneratedbyslack + msg: "{{ inventory_hostname }} completed" + +- name: Send notification message via Slack all options + local_action: + module: slack + domain: future500.slack.com + token: thetokengeneratedbyslack + msg: "{{ inventory_hostname }} completed" + channel: "#ansible" + username: "Ansible on {{ inventory_hostname }}" + icon_url: "http://www.example.com/some-image-file.png" + link_names: 0 + parse: 'none' + +""" + + +SLACK_INCOMING_WEBHOOK = 'https://%s/services/hooks/incoming-webhook?token=%s' + +def build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse): + payload = dict(text=text) + + if channel is not None: + payload['channel'] = channel if (channel[0] == '#') else '#'+channel + if username is not None: + payload['username'] = username + if icon_emoji is not None: + payload['icon_emoji'] = icon_emoji + else: + payload['icon_url'] = icon_url + if link_names is not None: + payload['link_names'] = link_names + if parse is not None: + payload['parse'] = parse + + payload="payload=" + module.jsonify(payload) + return payload + +def do_notify_slack(module, domain, token, payload): + slack_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, token) + + response, info = fetch_url(module, slack_incoming_webhook, data=payload) + if info['status'] != 200: + obscured_incoming_webhook = SLACK_INCOMING_WEBHOOK % (domain, '[obscured]') + module.fail_json(msg=" failed to send %s to %s: %s" % (payload, obscured_incoming_webhook, info['msg'])) + +def main(): + module = AnsibleModule( + argument_spec = dict( + domain = dict(type='str', required=True), + token = dict(type='str', required=True), + msg = dict(type='str', required=True), + channel = dict(type='str', default=None), + username = dict(type='str', default='Ansible'), + icon_url = dict(type='str', default='http://www.ansible.com/favicon.ico'), + icon_emoji = dict(type='str', default=None), + link_names = dict(type='int', default=1, choices=[0,1]), + parse = dict(type='str', default=None, choices=['none', 'full']), + + validate_certs = dict(default='yes', type='bool'), + ) + ) + + domain = module.params['domain'] + token = module.params['token'] + text = module.params['msg'] + channel = module.params['channel'] + username = module.params['username'] + icon_url = module.params['icon_url'] + icon_emoji = module.params['icon_emoji'] + link_names = module.params['link_names'] + parse = module.params['parse'] + + payload = build_payload_for_slack(module, text, channel, username, icon_url, icon_emoji, link_names, parse) + do_notify_slack(module, domain, token, payload) + + module.exit_json(msg="OK") + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +main() \ No newline at end of file diff --git a/lib/ansible/modules/extras/notification/sns b/lib/ansible/modules/extras/notification/sns new file mode 100644 index 00000000000..f2ed178554e --- /dev/null +++ b/lib/ansible/modules/extras/notification/sns @@ -0,0 +1,190 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Michael J. Schultz +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: sns +short_description: Send Amazon Simple Notification Service (SNS) messages +description: + - The M(sns) module sends notifications to a topic on your Amazon SNS account +version_added: 1.6 +author: Michael J. Schultz +options: + msg: + description: + - Default message to send. + required: true + aliases: [ "default" ] + subject: + description: + - Subject line for email delivery. + required: false + topic: + description: + - The topic you want to publish to. + required: true + email: + description: + - Message to send to email-only subscription + required: false + sqs: + description: + - Message to send to SQS-only subscription + required: false + sms: + description: + - Message to send to SMS-only subscription + required: false + http: + description: + - Message to send to HTTP-only subscription + required: false + https: + description: + - Message to send to HTTPS-only subscription + required: false + aws_secret_key: + description: + - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used. + required: false + default: None + aliases: ['ec2_secret_key', 'secret_key'] + aws_access_key: + description: + - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used. + required: false + default: None + aliases: ['ec2_access_key', 'access_key'] + region: + description: + - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used. + required: false + aliases: ['aws_region', 'ec2_region'] + +requirements: [ "boto" ] +author: Michael J. Schultz +""" + +EXAMPLES = """ +- name: Send default notification message via SNS + local_action: + module: sns + msg: "{{ inventory_hostname }} has completed the play." + subject: "Deploy complete!" + topic: "deploy" + +- name: Send notification messages via SNS with short message for SMS + local_action: + module: sns + msg: "{{ inventory_hostname }} has completed the play." + sms: "deployed!" + subject: "Deploy complete!" + topic: "deploy" +""" + +import sys + +from ansible.module_utils.basic import * +from ansible.module_utils.ec2 import * + +try: + import boto + import boto.sns +except ImportError: + print "failed=True msg='boto required for this module'" + sys.exit(1) + + +def arn_topic_lookup(connection, short_topic): + response = connection.get_all_topics() + result = response[u'ListTopicsResponse'][u'ListTopicsResult'] + # topic names cannot have colons, so this captures the full topic name + lookup_topic = ':{}'.format(short_topic) + for topic in result[u'Topics']: + if topic[u'TopicArn'].endswith(lookup_topic): + return topic[u'TopicArn'] + return None + + +def main(): + argument_spec = ec2_argument_spec() + argument_spec.update( + dict( + msg=dict(type='str', required=True, aliases=['default']), + subject=dict(type='str', default=None), + topic=dict(type='str', required=True), + email=dict(type='str', default=None), + sqs=dict(type='str', default=None), + sms=dict(type='str', default=None), + http=dict(type='str', default=None), + https=dict(type='str', default=None), + ) + ) + + module = AnsibleModule(argument_spec=argument_spec) + + msg = module.params['msg'] + subject = module.params['subject'] + topic = module.params['topic'] + email = module.params['email'] + sqs = module.params['sqs'] + sms = module.params['sms'] + http = module.params['http'] + https = module.params['https'] + + region, ec2_url, aws_connect_params = get_aws_connection_info(module) + if not region: + module.fail_json(msg="region must be specified") + try: + connection = connect_to_aws(boto.sns, region, **aws_connect_params) + except boto.exception.NoAuthHandlerFound, e: + module.fail_json(msg=str(e)) + + # .publish() takes full ARN topic id, but I'm lazy and type shortnames + # so do a lookup (topics cannot contain ':', so thats the decider) + if ':' in topic: + arn_topic = topic + else: + arn_topic = arn_topic_lookup(connection, topic) + + if not arn_topic: + module.fail_json(msg='Could not find topic: {}'.format(topic)) + + dict_msg = {'default': msg} + if email: + dict_msg.update(email=email) + if sqs: + dict_msg.update(sqs=sqs) + if sms: + dict_msg.update(sms=sms) + if http: + dict_msg.update(http=http) + if https: + dict_msg.update(https=https) + + json_msg = json.dumps(dict_msg) + try: + connection.publish(topic=arn_topic, subject=subject, + message_structure='json', message=json_msg) + except boto.exception.BotoServerError, e: + module.fail_json(msg=str(e)) + + module.exit_json(msg="OK") + +main() diff --git a/lib/ansible/modules/extras/notification/twilio b/lib/ansible/modules/extras/notification/twilio new file mode 100644 index 00000000000..8969c28aa50 --- /dev/null +++ b/lib/ansible/modules/extras/notification/twilio @@ -0,0 +1,135 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Matt Makai +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +version_added: "1.6" +module: twilio +short_description: Sends a text message to a mobile phone through Twilio. +description: + - Sends a text message to a phone number through an the Twilio SMS service. +notes: + - Like the other notification modules, this one requires an external + dependency to work. In this case, you'll need a Twilio account with + a purchased or verified phone number to send the text message. +options: + account_sid: + description: + user's account id for Twilio found on the account page + required: true + auth_token: + description: user's authentication token for Twilio found on the account page + required: true + msg: + description: + the body of the text message + required: true + to_number: + description: + what phone number to send the text message to, format +15551112222 + required: true + from_number: + description: + what phone number to send the text message from, format +15551112222 + required: true + +requirements: [ urllib, urllib2 ] +author: Matt Makai +''' + +EXAMPLES = ''' +# send a text message from the local server about the build status to (555) 303 5681 +# note: you have to have purchased the 'from_number' on your Twilio account +- local_action: text msg="All servers with webserver role are now configured." + account_sid={{ twilio_account_sid }} + auth_token={{ twilio_auth_token }} + from_number=+15552014545 to_number=+15553035681 + +# send a text message from a server to (555) 111 3232 +# note: you have to have purchased the 'from_number' on your Twilio account +- text: msg="This server's configuration is now complete." + account_sid={{ twilio_account_sid }} + auth_token={{ twilio_auth_token }} + from_number=+15553258899 to_number=+15551113232 + +''' + +# ======================================= +# text module support methods +# +try: + import urllib, urllib2 +except ImportError: + module.fail_json(msg="urllib and urllib2 are required") + +import base64 + + +def post_text(module, account_sid, auth_token, msg, from_number, to_number): + URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ + % (account_sid,) + AGENT = "Ansible/1.5" + + data = {'From':from_number, 'To':to_number, 'Body':msg} + encoded_data = urllib.urlencode(data) + request = urllib2.Request(URI) + base64string = base64.encodestring('%s:%s' % \ + (account_sid, auth_token)).replace('\n', '') + request.add_header('User-Agent', AGENT) + request.add_header('Content-type', 'application/x-www-form-urlencoded') + request.add_header('Accept', 'application/ansible') + request.add_header('Authorization', 'Basic %s' % base64string) + return urllib2.urlopen(request, encoded_data) + + +# ======================================= +# Main +# + +def main(): + + module = AnsibleModule( + argument_spec=dict( + account_sid=dict(required=True), + auth_token=dict(required=True), + msg=dict(required=True), + from_number=dict(required=True), + to_number=dict(required=True), + ), + supports_check_mode=True + ) + + account_sid = module.params['account_sid'] + auth_token = module.params['auth_token'] + msg = module.params['msg'] + from_number = module.params['from_number'] + to_number = module.params['to_number'] + + try: + response = post_text(module, account_sid, auth_token, msg, + from_number, to_number) + except Exception, e: + module.fail_json(msg="unable to send text message to %s" % to_number) + + module.exit_json(msg=msg, changed=False) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/notification/typetalk b/lib/ansible/modules/extras/notification/typetalk new file mode 100644 index 00000000000..b987acbe837 --- /dev/null +++ b/lib/ansible/modules/extras/notification/typetalk @@ -0,0 +1,116 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +DOCUMENTATION = ''' +--- +module: typetalk +version_added: "1.6" +short_description: Send a message to typetalk +description: + - Send a message to typetalk using typetalk API ( http://developers.typetalk.in/ ) +options: + client_id: + description: + - OAuth2 client ID + required: true + client_secret: + description: + - OAuth2 client secret + required: true + topic: + description: + - topic id to post message + required: true + msg: + description: + - message body + required: true +requirements: [ urllib, urllib2, json ] +author: Takashi Someda +''' + +EXAMPLES = ''' +- typetalk: client_id=12345 client_secret=12345 topic=1 msg="install completed" +''' + +try: + import urllib +except ImportError: + urllib = None + +try: + import urllib2 +except ImportError: + urllib2 = None + +try: + import json +except ImportError: + json = None + + +def do_request(url, params, headers={}): + data = urllib.urlencode(params) + headers = dict(headers, **{ + 'User-Agent': 'Ansible/typetalk module', + }) + return urllib2.urlopen(urllib2.Request(url, data, headers)) + + +def get_access_token(client_id, client_secret): + params = { + 'client_id': client_id, + 'client_secret': client_secret, + 'grant_type': 'client_credentials', + 'scope': 'topic.post' + } + res = do_request('https://typetalk.in/oauth2/access_token', params) + return json.load(res)['access_token'] + + +def send_message(client_id, client_secret, topic, msg): + """ + send message to typetalk + """ + try: + access_token = get_access_token(client_id, client_secret) + url = 'https://typetalk.in/api/v1/topics/%d' % topic + headers = { + 'Authorization': 'Bearer %s' % access_token, + } + do_request(url, {'message': msg}, headers) + return True, {'access_token': access_token} + except urllib2.HTTPError, e: + return False, e + + +def main(): + + module = AnsibleModule( + argument_spec=dict( + client_id=dict(required=True), + client_secret=dict(required=True), + topic=dict(required=True, type='int'), + msg=dict(required=True), + ), + supports_check_mode=False + ) + + if not (urllib and urllib2 and json): + module.fail_json(msg="urllib, urllib2 and json modules are required") + + client_id = module.params["client_id"] + client_secret = module.params["client_secret"] + topic = module.params["topic"] + msg = module.params["msg"] + + res, error = send_message(client_id, client_secret, topic, msg) + if not res: + module.fail_json(msg='fail to send message with response code %s' % error.code) + + module.exit_json(changed=True, topic=topic, msg=msg) + + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/packaging/composer b/lib/ansible/modules/extras/packaging/composer new file mode 100644 index 00000000000..2930018bd9f --- /dev/null +++ b/lib/ansible/modules/extras/packaging/composer @@ -0,0 +1,164 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Dimitrios Tydeas Mengidis + +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: composer +author: Dimitrios Tydeas Mengidis +short_description: Dependency Manager for PHP +version_added: "1.6" +description: + - Composer is a tool for dependency management in PHP. It allows you to declare the dependent libraries your project needs and it will install them in your project for you +options: + command: + version_added: "1.8" + description: + - Composer command like "install", "update" and so on + required: false + default: install + working_dir: + description: + - Directory of your project ( see --working-dir ) + required: true + default: null + aliases: [ "working-dir" ] + prefer_source: + description: + - Forces installation from package sources when possible ( see --prefer-source ) + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [ "prefer-source" ] + prefer_dist: + description: + - Forces installation from package dist even for de versions ( see --prefer-dist ) + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [ "prefer-dist" ] + no_dev: + description: + - Disables installation of require-dev packages ( see --no-dev ) + required: false + default: "yes" + choices: [ "yes", "no" ] + aliases: [ "no-dev" ] + no_scripts: + description: + - Skips the execution of all scripts defined in composer.json ( see --no-scripts ) + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [ "no-scripts" ] + no_plugins: + description: + - Disables all plugins ( see --no-plugins ) + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [ "no-plugins" ] + optimize_autoloader: + description: + - Optimize autoloader during autoloader dump ( see --optimize-autoloader ). Convert PSR-0/4 autoloading to classmap to get a faster autoloader. This is recommended especially for production, but can take a bit of time to run so it is currently not done by default. + required: false + default: "yes" + choices: [ "yes", "no" ] + aliases: [ "optimize-autoloader" ] +requirements: + - php + - composer installed in bin path (recommended /usr/local/bin) +notes: + - Default options that are always appended in each execution are --no-ansi, --no-progress, and --no-interaction +''' + +EXAMPLES = ''' +# Downloads and installs all the libs and dependencies outlined in the /path/to/project/composer.lock +- composer: command=install working_dir=/path/to/project +''' + +import os +import re + +def parse_out(string): + return re.sub("\s+", " ", string).strip() + +def has_changed(string): + return (re.match("Nothing to install or update", string) != None) + +def composer_install(module, command, options): + php_path = module.get_bin_path("php", True, ["/usr/local/bin"]) + composer_path = module.get_bin_path("composer", True, ["/usr/local/bin"]) + cmd = "%s %s %s %s" % (php_path, composer_path, command, " ".join(options)) + + return module.run_command(cmd) + +def main(): + module = AnsibleModule( + argument_spec = dict( + command = dict(default="install", type="str", required=False), + working_dir = dict(aliases=["working-dir"], required=True), + prefer_source = dict(default="no", type="bool", aliases=["prefer-source"]), + prefer_dist = dict(default="no", type="bool", aliases=["prefer-dist"]), + no_dev = dict(default="yes", type="bool", aliases=["no-dev"]), + no_scripts = dict(default="no", type="bool", aliases=["no-scripts"]), + no_plugins = dict(default="no", type="bool", aliases=["no-plugins"]), + optimize_autoloader = dict(default="yes", type="bool", aliases=["optimize-autoloader"]), + ), + supports_check_mode=True + ) + + module.params["working_dir"] = os.path.abspath(module.params["working_dir"]) + + options = set([]) + # Default options + options.add("--no-ansi") + options.add("--no-progress") + options.add("--no-interaction") + + if module.check_mode: + options.add("--dry-run") + + # Get composer command with fallback to default + command = module.params['command'] + del module.params['command']; + + # Prepare options + for i in module.params: + opt = "--%s" % i.replace("_","-") + p = module.params[i] + if isinstance(p, (bool)) and p: + options.add(opt) + elif isinstance(p, (str)): + options.add("%s=%s" % (opt, p)) + + rc, out, err = composer_install(module, command, options) + + if rc != 0: + output = parse_out(err) + module.fail_json(msg=output) + else: + output = parse_out(out) + module.exit_json(changed=has_changed(output), msg=output) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/packaging/cpanm b/lib/ansible/modules/extras/packaging/cpanm new file mode 100644 index 00000000000..5b1a9878d21 --- /dev/null +++ b/lib/ansible/modules/extras/packaging/cpanm @@ -0,0 +1,145 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Franck Cuny +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: cpanm +short_description: Manages Perl library dependencies. +description: + - Manage Perl library dependencies. +version_added: "1.6" +options: + name: + description: + - The name of the Perl library to install + required: false + default: null + aliases: ["pkg"] + from_path: + description: + - The local directory from where to install + required: false + default: null + notest: + description: + - Do not run unit tests + required: false + default: false + locallib: + description: + - Specify the install base to install modules + required: false + default: false + mirror: + description: + - Specifies the base URL for the CPAN mirror to use + required: false + default: false +examples: + - code: "cpanm: name=Dancer" + description: Install I(Dancer) perl package. + - code: "cpanm: name=Dancer locallib=/srv/webapps/my_app/extlib" + description: "Install I(Dancer) (U(http://perldancer.org/)) into the specified I(locallib)" + - code: "cpanm: from_path=/srv/webapps/my_app/src/" + description: Install perl dependencies from local directory. + - code: "cpanm: name=Dancer notest=True locallib=/srv/webapps/my_app/extlib" + description: Install I(Dancer) perl package without running the unit tests in indicated I(locallib). + - code: "cpanm: name=Dancer mirror=http://cpan.cpantesters.org/" + description: Install I(Dancer) perl package from a specific mirror +notes: + - Please note that U(http://search.cpan.org/dist/App-cpanminus/bin/cpanm, cpanm) must be installed on the remote host. +author: Franck Cuny +''' + +def _is_package_installed(module, name, locallib, cpanm): + cmd = "" + if locallib: + os.environ["PERL5LIB"] = "%s/lib/perl5" % locallib + cmd = "%s perl -M%s -e '1'" % (cmd, name) + res, stdout, stderr = module.run_command(cmd, check_rc=False) + if res == 0: + return True + else: + return False + +def _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm): + # this code should use "%s" like everything else and just return early but not fixing all of it now. + # don't copy stuff like this + if from_path: + cmd = "{cpanm} {path}".format(cpanm=cpanm, path=from_path) + else: + cmd = "{cpanm} {name}".format(cpanm=cpanm, name=name) + + if notest is True: + cmd = "{cmd} -n".format(cmd=cmd) + + if locallib is not None: + cmd = "{cmd} -l {locallib}".format(cmd=cmd, locallib=locallib) + + if mirror is not None: + cmd = "{cmd} --mirror {mirror}".format(cmd=cmd, mirror=mirror) + + return cmd + + +def main(): + arg_spec = dict( + name=dict(default=None, required=False, aliases=['pkg']), + from_path=dict(default=None, required=False), + notest=dict(default=False, type='bool'), + locallib=dict(default=None, required=False), + mirror=dict(default=None, required=False) + ) + + module = AnsibleModule( + argument_spec=arg_spec, + required_one_of=[['name', 'from_path']], + ) + + cpanm = module.get_bin_path('cpanm', True) + name = module.params['name'] + from_path = module.params['from_path'] + notest = module.boolean(module.params.get('notest', False)) + locallib = module.params['locallib'] + mirror = module.params['mirror'] + + changed = False + + installed = _is_package_installed(module, name, locallib, cpanm) + + if not installed: + out_cpanm = err_cpanm = '' + cmd = _build_cmd_line(name, from_path, notest, locallib, mirror, cpanm) + + rc_cpanm, out_cpanm, err_cpanm = module.run_command(cmd, check_rc=False) + + if rc_cpanm != 0: + module.fail_json(msg=err_cpanm, cmd=cmd) + + if err_cpanm and 'is up to date' not in err_cpanm: + changed = True + + module.exit_json(changed=changed, binary=cpanm, name=name) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/packaging/homebrew b/lib/ansible/modules/extras/packaging/homebrew new file mode 100644 index 00000000000..2ecac0c4ace --- /dev/null +++ b/lib/ansible/modules/extras/packaging/homebrew @@ -0,0 +1,835 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Andrew Dunham +# (c) 2013, Daniel Jaouen +# +# Based on macports (Jimmy Tang ) +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +DOCUMENTATION = ''' +--- +module: homebrew +author: Andrew Dunham and Daniel Jaouen +short_description: Package manager for Homebrew +description: + - Manages Homebrew packages +version_added: "1.1" +options: + name: + description: + - name of package to install/remove + required: true + state: + description: + - state of the package + choices: [ 'head', 'latest', 'present', 'absent', 'linked', 'unlinked' ] + required: false + default: present + update_homebrew: + description: + - update homebrew itself first + required: false + default: "no" + choices: [ "yes", "no" ] + upgrade_all: + description: + - upgrade all homebrew packages + required: false + default: no + choices: [ "yes", "no" ] + install_options: + description: + - options flags to install a package + required: false + default: null + version_added: "1.4" +notes: [] +''' +EXAMPLES = ''' +- homebrew: name=foo state=present +- homebrew: name=foo state=present update_homebrew=yes +- homebrew: name=foo state=latest update_homebrew=yes +- homebrew: update_homebrew=yes upgrade_all=yes +- homebrew: name=foo state=head +- homebrew: name=foo state=linked +- homebrew: name=foo state=absent +- homebrew: name=foo,bar state=absent +- homebrew: name=foo state=present install_options=with-baz,enable-debug +''' + +import os.path +import re + + +# exceptions -------------------------------------------------------------- {{{ +class HomebrewException(Exception): + pass +# /exceptions ------------------------------------------------------------- }}} + + +# utils ------------------------------------------------------------------- {{{ +def _create_regex_group(s): + lines = (line.strip() for line in s.split('\n') if line.strip()) + chars = filter(None, (line.split('#')[0].strip() for line in lines)) + group = r'[^' + r''.join(chars) + r']' + return re.compile(group) +# /utils ------------------------------------------------------------------ }}} + + +class Homebrew(object): + '''A class to manage Homebrew packages.''' + + # class regexes ------------------------------------------------ {{{ + VALID_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + : # colons + {sep} # the OS-specific path separator + . # dots + - # dashes + '''.format(sep=os.path.sep) + + VALID_BREW_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + {sep} # the OS-specific path separator + . # dots + - # dashes + '''.format(sep=os.path.sep) + + VALID_PACKAGE_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + . # dots + \+ # plusses + - # dashes + ''' + + INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS) + INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS) + INVALID_PACKAGE_REGEX = _create_regex_group(VALID_PACKAGE_CHARS) + # /class regexes ----------------------------------------------- }}} + + # class validations -------------------------------------------- {{{ + @classmethod + def valid_path(cls, path): + ''' + `path` must be one of: + - list of paths + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - colons + - os.path.sep + ''' + + if isinstance(path, basestring): + return not cls.INVALID_PATH_REGEX.search(path) + + try: + iter(path) + except TypeError: + return False + else: + paths = path + return all(cls.valid_brew_path(path_) for path_ in paths) + + @classmethod + def valid_brew_path(cls, brew_path): + ''' + `brew_path` must be one of: + - None + - a string containing only: + - alphanumeric characters + - dashes + - dots + - spaces + - os.path.sep + ''' + + if brew_path is None: + return True + + return ( + isinstance(brew_path, basestring) + and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) + ) + + @classmethod + def valid_package(cls, package): + '''A valid package is either None or alphanumeric.''' + + if package is None: + return True + + return ( + isinstance(package, basestring) + and not cls.INVALID_PACKAGE_REGEX.search(package) + ) + + @classmethod + def valid_state(cls, state): + ''' + A valid state is one of: + - None + - installed + - upgraded + - head + - linked + - unlinked + - absent + ''' + + if state is None: + return True + else: + return ( + isinstance(state, basestring) + and state.lower() in ( + 'installed', + 'upgraded', + 'head', + 'linked', + 'unlinked', + 'absent', + ) + ) + + @classmethod + def valid_module(cls, module): + '''A valid module is an instance of AnsibleModule.''' + + return isinstance(module, AnsibleModule) + + # /class validations ------------------------------------------- }}} + + # class properties --------------------------------------------- {{{ + @property + def module(self): + return self._module + + @module.setter + def module(self, module): + if not self.valid_module(module): + self._module = None + self.failed = True + self.message = 'Invalid module: {0}.'.format(module) + raise HomebrewException(self.message) + + else: + self._module = module + return module + + @property + def path(self): + return self._path + + @path.setter + def path(self, path): + if not self.valid_path(path): + self._path = [] + self.failed = True + self.message = 'Invalid path: {0}.'.format(path) + raise HomebrewException(self.message) + + else: + if isinstance(path, basestring): + self._path = path.split(':') + else: + self._path = path + + return path + + @property + def brew_path(self): + return self._brew_path + + @brew_path.setter + def brew_path(self, brew_path): + if not self.valid_brew_path(brew_path): + self._brew_path = None + self.failed = True + self.message = 'Invalid brew_path: {0}.'.format(brew_path) + raise HomebrewException(self.message) + + else: + self._brew_path = brew_path + return brew_path + + @property + def params(self): + return self._params + + @params.setter + def params(self, params): + self._params = self.module.params + return self._params + + @property + def current_package(self): + return self._current_package + + @current_package.setter + def current_package(self, package): + if not self.valid_package(package): + self._current_package = None + self.failed = True + self.message = 'Invalid package: {0}.'.format(package) + raise HomebrewException(self.message) + + else: + self._current_package = package + return package + # /class properties -------------------------------------------- }}} + + def __init__(self, module, path=None, packages=None, state=None, + update_homebrew=False, upgrade_all=False, + install_options=None): + if not install_options: + install_options = list() + self._setup_status_vars() + self._setup_instance_vars(module=module, path=path, packages=packages, + state=state, update_homebrew=update_homebrew, + upgrade_all=upgrade_all, + install_options=install_options, ) + + self._prep() + + # prep --------------------------------------------------------- {{{ + def _setup_status_vars(self): + self.failed = False + self.changed = False + self.changed_count = 0 + self.unchanged_count = 0 + self.message = '' + + def _setup_instance_vars(self, **kwargs): + for key, val in kwargs.iteritems(): + setattr(self, key, val) + + def _prep(self): + self._prep_path() + self._prep_brew_path() + + def _prep_path(self): + if not self.path: + self.path = ['/usr/local/bin'] + + def _prep_brew_path(self): + if not self.module: + self.brew_path = None + self.failed = True + self.message = 'AnsibleModule not set.' + raise HomebrewException(self.message) + + self.brew_path = self.module.get_bin_path( + 'brew', + required=True, + opt_dirs=self.path, + ) + if not self.brew_path: + self.brew_path = None + self.failed = True + self.message = 'Unable to locate homebrew executable.' + raise HomebrewException('Unable to locate homebrew executable.') + + return self.brew_path + + def _status(self): + return (self.failed, self.changed, self.message) + # /prep -------------------------------------------------------- }}} + + def run(self): + try: + self._run() + except HomebrewException: + pass + + if not self.failed and (self.changed_count + self.unchanged_count > 1): + self.message = "Changed: %d, Unchanged: %d" % ( + self.changed_count, + self.unchanged_count, + ) + (failed, changed, message) = self._status() + + return (failed, changed, message) + + # checks ------------------------------------------------------- {{{ + def _current_package_is_installed(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + cmd = [ + "{brew_path}".format(brew_path=self.brew_path), + "info", + self.current_package, + ] + rc, out, err = self.module.run_command(cmd) + for line in out.split('\n'): + if ( + re.search(r'Built from source', line) + or re.search(r'Poured from bottle', line) + ): + return True + + return False + + def _outdated_packages(self): + rc, out, err = self.module.run_command([ + self.brew_path, + 'outdated', + ]) + return [line.split(' ')[0].strip() for line in out.split('\n') if line] + + def _current_package_is_outdated(self): + if not self.valid_package(self.current_package): + return False + + return self.current_package in self._outdated_packages() + + def _current_package_is_installed_from_head(self): + if not Homebrew.valid_package(self.current_package): + return False + elif not self._current_package_is_installed(): + return False + + rc, out, err = self.module.run_command([ + self.brew_path, + 'info', + self.current_package, + ]) + + try: + version_info = [line for line in out.split('\n') if line][0] + except IndexError: + return False + + return version_info.split(' ')[-1] == 'HEAD' + # /checks ------------------------------------------------------ }}} + + # commands ----------------------------------------------------- {{{ + def _run(self): + if self.update_homebrew: + self._update_homebrew() + + if self.upgrade_all: + self._upgrade_all() + + if self.packages: + if self.state == 'installed': + return self._install_packages() + elif self.state == 'upgraded': + return self._upgrade_packages() + elif self.state == 'head': + return self._install_packages() + elif self.state == 'linked': + return self._link_packages() + elif self.state == 'unlinked': + return self._unlink_packages() + elif self.state == 'absent': + return self._uninstall_packages() + + # updated -------------------------------- {{{ + def _update_homebrew(self): + rc, out, err = self.module.run_command([ + self.brew_path, + 'update', + ]) + if rc == 0: + if out and isinstance(out, basestring): + already_updated = any( + re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) + for s in out.split('\n') + if s + ) + if not already_updated: + self.changed = True + self.message = 'Homebrew updated successfully.' + else: + self.message = 'Homebrew already up-to-date.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + # /updated ------------------------------- }}} + + # _upgrade_all --------------------------- {{{ + def _upgrade_all(self): + rc, out, err = self.module.run_command([ + self.brew_path, + 'upgrade', + ]) + if rc == 0: + if not out: + self.message = 'Homebrew packages already upgraded.' + + else: + self.changed = True + self.message = 'Homebrew upgraded.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + # /_upgrade_all -------------------------- }}} + + # installed ------------------------------ {{{ + def _install_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if self._current_package_is_installed(): + self.unchanged_count += 1 + self.message = 'Package already installed: {0}'.format( + self.current_package, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be installed: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + if self.state == 'head': + head = '--HEAD' + else: + head = None + + opts = ( + [self.brew_path, 'install'] + + self.install_options + + [self.current_package, head] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if self._current_package_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Package installed: {0}'.format(self.current_package) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _install_packages(self): + for package in self.packages: + self.current_package = package + self._install_current_package() + + return True + # /installed ----------------------------- }}} + + # upgraded ------------------------------- {{{ + def _upgrade_current_package(self): + command = 'upgrade' + + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if not self._current_package_is_installed(): + command = 'install' + + if self._current_package_is_installed() and not self._current_package_is_outdated(): + self.message = 'Package is already upgraded: {0}'.format( + self.current_package, + ) + self.unchanged_count += 1 + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be upgraded: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + opts = ( + [self.brew_path, command] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if self._current_package_is_installed() and not self._current_package_is_outdated(): + self.changed_count += 1 + self.changed = True + self.message = 'Package upgraded: {0}'.format(self.current_package) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _upgrade_all_packages(self): + opts = ( + [self.brew_path, 'upgrade'] + + self.install_options + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + self.changed = True + self.message = 'All packages upgraded.' + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _upgrade_packages(self): + if not self.packages: + self._upgrade_all_packages() + else: + for package in self.packages: + self.current_package = package + self._upgrade_current_package() + return True + # /upgraded ------------------------------ }}} + + # uninstalled ---------------------------- {{{ + def _uninstall_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if not self._current_package_is_installed(): + self.unchanged_count += 1 + self.message = 'Package already uninstalled: {0}'.format( + self.current_package, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be uninstalled: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + opts = ( + [self.brew_path, 'uninstall'] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if not self._current_package_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Package uninstalled: {0}'.format(self.current_package) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewException(self.message) + + def _uninstall_packages(self): + for package in self.packages: + self.current_package = package + self._uninstall_current_package() + + return True + # /uninstalled ----------------------------- }}} + + # linked --------------------------------- {{{ + def _link_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if not self._current_package_is_installed(): + self.failed = True + self.message = 'Package not installed: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be linked: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + opts = ( + [self.brew_path, 'link'] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + self.changed_count += 1 + self.changed = True + self.message = 'Package linked: {0}'.format(self.current_package) + + return True + else: + self.failed = True + self.message = 'Package could not be linked: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + def _link_packages(self): + for package in self.packages: + self.current_package = package + self._link_current_package() + + return True + # /linked -------------------------------- }}} + + # unlinked ------------------------------- {{{ + def _unlink_current_package(self): + if not self.valid_package(self.current_package): + self.failed = True + self.message = 'Invalid package: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if not self._current_package_is_installed(): + self.failed = True + self.message = 'Package not installed: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + if self.module.check_mode: + self.changed = True + self.message = 'Package would be unlinked: {0}'.format( + self.current_package + ) + raise HomebrewException(self.message) + + opts = ( + [self.brew_path, 'unlink'] + + self.install_options + + [self.current_package] + ) + cmd = [opt for opt in opts if opt] + rc, out, err = self.module.run_command(cmd) + + if rc == 0: + self.changed_count += 1 + self.changed = True + self.message = 'Package unlinked: {0}'.format(self.current_package) + + return True + else: + self.failed = True + self.message = 'Package could not be unlinked: {0}.'.format(self.current_package) + raise HomebrewException(self.message) + + def _unlink_packages(self): + for package in self.packages: + self.current_package = package + self._unlink_current_package() + + return True + # /unlinked ------------------------------ }}} + # /commands ---------------------------------------------------- }}} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=["pkg"], required=False), + path=dict(required=False), + state=dict( + default="present", + choices=[ + "present", "installed", + "latest", "upgraded", "head", + "linked", "unlinked", + "absent", "removed", "uninstalled", + ], + ), + update_homebrew=dict( + default="no", + aliases=["update-brew"], + type='bool', + ), + upgrade_all=dict( + default="no", + aliases=["upgrade"], + type='bool', + ), + install_options=dict( + default=None, + aliases=['options'], + type='list', + ) + ), + supports_check_mode=True, + ) + p = module.params + + if p['name']: + packages = p['name'].split(',') + else: + packages = None + + path = p['path'] + if path: + path = path.split(':') + else: + path = ['/usr/local/bin'] + + state = p['state'] + if state in ('present', 'installed'): + state = 'installed' + if state in ('head', ): + state = 'head' + if state in ('latest', 'upgraded'): + state = 'upgraded' + if state == 'linked': + state = 'linked' + if state == 'unlinked': + state = 'unlinked' + if state in ('absent', 'removed', 'uninstalled'): + state = 'absent' + + update_homebrew = p['update_homebrew'] + upgrade_all = p['upgrade_all'] + p['install_options'] = p['install_options'] or [] + install_options = ['--{0}'.format(install_option) + for install_option in p['install_options']] + + brew = Homebrew(module=module, path=path, packages=packages, + state=state, update_homebrew=update_homebrew, + upgrade_all=upgrade_all, install_options=install_options) + (failed, changed, message) = brew.run() + if failed: + module.fail_json(msg=message) + else: + module.exit_json(changed=changed, msg=message) + +# this is magic, see lib/ansible/module_common.py +#<> +main() diff --git a/lib/ansible/modules/extras/packaging/homebrew_cask b/lib/ansible/modules/extras/packaging/homebrew_cask new file mode 100644 index 00000000000..dede8d4bb36 --- /dev/null +++ b/lib/ansible/modules/extras/packaging/homebrew_cask @@ -0,0 +1,513 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Daniel Jaouen +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +DOCUMENTATION = ''' +--- +module: homebrew_cask +author: Daniel Jaouen +short_description: Install/uninstall homebrew casks. +description: + - Manages Homebrew casks. +version_added: "1.6" +options: + name: + description: + - name of cask to install/remove + required: true + state: + description: + - state of the cask + choices: [ 'installed', 'uninstalled' ] + required: false + default: present +''' +EXAMPLES = ''' +- homebrew_cask: name=alfred state=present +- homebrew_cask: name=alfred state=absent +''' + +import os.path +import re + + +# exceptions -------------------------------------------------------------- {{{ +class HomebrewCaskException(Exception): + pass +# /exceptions ------------------------------------------------------------- }}} + + +# utils ------------------------------------------------------------------- {{{ +def _create_regex_group(s): + lines = (line.strip() for line in s.split('\n') if line.strip()) + chars = filter(None, (line.split('#')[0].strip() for line in lines)) + group = r'[^' + r''.join(chars) + r']' + return re.compile(group) +# /utils ------------------------------------------------------------------ }}} + + +class HomebrewCask(object): + '''A class to manage Homebrew casks.''' + + # class regexes ------------------------------------------------ {{{ + VALID_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + : # colons + {sep} # the OS-specific path separator + - # dashes + '''.format(sep=os.path.sep) + + VALID_BREW_PATH_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + \s # spaces + {sep} # the OS-specific path separator + - # dashes + '''.format(sep=os.path.sep) + + VALID_CASK_CHARS = r''' + \w # alphanumeric characters (i.e., [a-zA-Z0-9_]) + - # dashes + ''' + + INVALID_PATH_REGEX = _create_regex_group(VALID_PATH_CHARS) + INVALID_BREW_PATH_REGEX = _create_regex_group(VALID_BREW_PATH_CHARS) + INVALID_CASK_REGEX = _create_regex_group(VALID_CASK_CHARS) + # /class regexes ----------------------------------------------- }}} + + # class validations -------------------------------------------- {{{ + @classmethod + def valid_path(cls, path): + ''' + `path` must be one of: + - list of paths + - a string containing only: + - alphanumeric characters + - dashes + - spaces + - colons + - os.path.sep + ''' + + if isinstance(path, basestring): + return not cls.INVALID_PATH_REGEX.search(path) + + try: + iter(path) + except TypeError: + return False + else: + paths = path + return all(cls.valid_brew_path(path_) for path_ in paths) + + @classmethod + def valid_brew_path(cls, brew_path): + ''' + `brew_path` must be one of: + - None + - a string containing only: + - alphanumeric characters + - dashes + - spaces + - os.path.sep + ''' + + if brew_path is None: + return True + + return ( + isinstance(brew_path, basestring) + and not cls.INVALID_BREW_PATH_REGEX.search(brew_path) + ) + + @classmethod + def valid_cask(cls, cask): + '''A valid cask is either None or alphanumeric + backslashes.''' + + if cask is None: + return True + + return ( + isinstance(cask, basestring) + and not cls.INVALID_CASK_REGEX.search(cask) + ) + + @classmethod + def valid_state(cls, state): + ''' + A valid state is one of: + - installed + - absent + ''' + + if state is None: + return True + else: + return ( + isinstance(state, basestring) + and state.lower() in ( + 'installed', + 'absent', + ) + ) + + @classmethod + def valid_module(cls, module): + '''A valid module is an instance of AnsibleModule.''' + + return isinstance(module, AnsibleModule) + # /class validations ------------------------------------------- }}} + + # class properties --------------------------------------------- {{{ + @property + def module(self): + return self._module + + @module.setter + def module(self, module): + if not self.valid_module(module): + self._module = None + self.failed = True + self.message = 'Invalid module: {0}.'.format(module) + raise HomebrewCaskException(self.message) + + else: + self._module = module + return module + + @property + def path(self): + return self._path + + @path.setter + def path(self, path): + if not self.valid_path(path): + self._path = [] + self.failed = True + self.message = 'Invalid path: {0}.'.format(path) + raise HomebrewCaskException(self.message) + + else: + if isinstance(path, basestring): + self._path = path.split(':') + else: + self._path = path + + return path + + @property + def brew_path(self): + return self._brew_path + + @brew_path.setter + def brew_path(self, brew_path): + if not self.valid_brew_path(brew_path): + self._brew_path = None + self.failed = True + self.message = 'Invalid brew_path: {0}.'.format(brew_path) + raise HomebrewCaskException(self.message) + + else: + self._brew_path = brew_path + return brew_path + + @property + def params(self): + return self._params + + @params.setter + def params(self, params): + self._params = self.module.params + return self._params + + @property + def current_cask(self): + return self._current_cask + + @current_cask.setter + def current_cask(self, cask): + if not self.valid_cask(cask): + self._current_cask = None + self.failed = True + self.message = 'Invalid cask: {0}.'.format(cask) + raise HomebrewCaskException(self.message) + + else: + self._current_cask = cask + return cask + # /class properties -------------------------------------------- }}} + + def __init__(self, module, path=None, casks=None, state=None): + self._setup_status_vars() + self._setup_instance_vars(module=module, path=path, casks=casks, + state=state) + + self._prep() + + # prep --------------------------------------------------------- {{{ + def _setup_status_vars(self): + self.failed = False + self.changed = False + self.changed_count = 0 + self.unchanged_count = 0 + self.message = '' + + def _setup_instance_vars(self, **kwargs): + for key, val in kwargs.iteritems(): + setattr(self, key, val) + + def _prep(self): + self._prep_path() + self._prep_brew_path() + + def _prep_path(self): + if not self.path: + self.path = ['/usr/local/bin'] + + def _prep_brew_path(self): + if not self.module: + self.brew_path = None + self.failed = True + self.message = 'AnsibleModule not set.' + raise HomebrewCaskException(self.message) + + self.brew_path = self.module.get_bin_path( + 'brew', + required=True, + opt_dirs=self.path, + ) + if not self.brew_path: + self.brew_path = None + self.failed = True + self.message = 'Unable to locate homebrew executable.' + raise HomebrewCaskException('Unable to locate homebrew executable.') + + return self.brew_path + + def _status(self): + return (self.failed, self.changed, self.message) + # /prep -------------------------------------------------------- }}} + + def run(self): + try: + self._run() + except HomebrewCaskException: + pass + + if not self.failed and (self.changed_count + self.unchanged_count > 1): + self.message = "Changed: %d, Unchanged: %d" % ( + self.changed_count, + self.unchanged_count, + ) + (failed, changed, message) = self._status() + + return (failed, changed, message) + + # checks ------------------------------------------------------- {{{ + def _current_cask_is_installed(self): + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + cmd = [self.brew_path, 'cask', 'list'] + rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0]) + + if 'nothing to list' in err: + return False + elif rc == 0: + casks = [cask_.strip() for cask_ in out.split('\n') if cask_.strip()] + return self.current_cask in casks + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + # /checks ------------------------------------------------------ }}} + + # commands ----------------------------------------------------- {{{ + def _run(self): + if self.state == 'installed': + return self._install_casks() + elif self.state == 'absent': + return self._uninstall_casks() + + if self.command: + return self._command() + + # updated -------------------------------- {{{ + def _update_homebrew(self): + rc, out, err = self.module.run_command([ + self.brew_path, + 'update', + ], path_prefix=self.path[0]) + if rc == 0: + if out and isinstance(out, basestring): + already_updated = any( + re.search(r'Already up-to-date.', s.strip(), re.IGNORECASE) + for s in out.split('\n') + if s + ) + if not already_updated: + self.changed = True + self.message = 'Homebrew updated successfully.' + else: + self.message = 'Homebrew already up-to-date.' + + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + # /updated ------------------------------- }}} + + # installed ------------------------------ {{{ + def _install_current_cask(self): + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + if self._current_cask_is_installed(): + self.unchanged_count += 1 + self.message = 'Cask already installed: {0}'.format( + self.current_cask, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Cask would be installed: {0}'.format( + self.current_cask + ) + raise HomebrewCaskException(self.message) + + cmd = [opt + for opt in (self.brew_path, 'cask', 'install', self.current_cask) + if opt] + + rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0]) + + if self._current_cask_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Cask installed: {0}'.format(self.current_cask) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + + def _install_casks(self): + for cask in self.casks: + self.current_cask = cask + self._install_current_cask() + + return True + # /installed ----------------------------- }}} + + # uninstalled ---------------------------- {{{ + def _uninstall_current_cask(self): + if not self.valid_cask(self.current_cask): + self.failed = True + self.message = 'Invalid cask: {0}.'.format(self.current_cask) + raise HomebrewCaskException(self.message) + + if not self._current_cask_is_installed(): + self.unchanged_count += 1 + self.message = 'Cask already uninstalled: {0}'.format( + self.current_cask, + ) + return True + + if self.module.check_mode: + self.changed = True + self.message = 'Cask would be uninstalled: {0}'.format( + self.current_cask + ) + raise HomebrewCaskException(self.message) + + cmd = [opt + for opt in (self.brew_path, 'cask', 'uninstall', self.current_cask) + if opt] + + rc, out, err = self.module.run_command(cmd, path_prefix=self.path[0]) + + if not self._current_cask_is_installed(): + self.changed_count += 1 + self.changed = True + self.message = 'Cask uninstalled: {0}'.format(self.current_cask) + return True + else: + self.failed = True + self.message = err.strip() + raise HomebrewCaskException(self.message) + + def _uninstall_casks(self): + for cask in self.casks: + self.current_cask = cask + self._uninstall_current_cask() + + return True + # /uninstalled ----------------------------- }}} + # /commands ---------------------------------------------------- }}} + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=["cask"], required=False), + path=dict(required=False), + state=dict( + default="present", + choices=[ + "present", "installed", + "absent", "removed", "uninstalled", + ], + ), + ), + supports_check_mode=True, + ) + p = module.params + + if p['name']: + casks = p['name'].split(',') + else: + casks = None + + path = p['path'] + if path: + path = path.split(':') + else: + path = ['/usr/local/bin'] + + state = p['state'] + if state in ('present', 'installed'): + state = 'installed' + if state in ('absent', 'removed', 'uninstalled'): + state = 'absent' + + brew_cask = HomebrewCask(module=module, path=path, casks=casks, + state=state) + (failed, changed, message) = brew_cask.run() + if failed: + module.fail_json(msg=message) + else: + module.exit_json(changed=changed, msg=message) + +# this is magic, see lib/ansible/module_common.py +#<> +main() diff --git a/lib/ansible/modules/extras/packaging/homebrew_tap b/lib/ansible/modules/extras/packaging/homebrew_tap new file mode 100644 index 00000000000..a79ba076a8a --- /dev/null +++ b/lib/ansible/modules/extras/packaging/homebrew_tap @@ -0,0 +1,215 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Daniel Jaouen +# Based on homebrew (Andrew Dunham ) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import re + +DOCUMENTATION = ''' +--- +module: homebrew_tap +author: Daniel Jaouen +short_description: Tap a Homebrew repository. +description: + - Tap external Homebrew repositories. +version_added: "1.6" +options: + tap: + description: + - The repository to tap. + required: true + state: + description: + - state of the repository. + choices: [ 'present', 'absent' ] + required: false + default: 'present' +requirements: [ homebrew ] +''' + +EXAMPLES = ''' +homebrew_tap: tap=homebrew/dupes state=present +homebrew_tap: tap=homebrew/dupes state=absent +homebrew_tap: tap=homebrew/dupes,homebrew/science state=present +''' + + +def a_valid_tap(tap): + '''Returns True if the tap is valid.''' + regex = re.compile(r'^(\S+)/(homebrew-)?(\w+)$') + return regex.match(tap) + + +def already_tapped(module, brew_path, tap): + '''Returns True if already tapped.''' + + rc, out, err = module.run_command([ + brew_path, + 'tap', + ]) + taps = [tap_.strip().lower() for tap_ in out.split('\n') if tap_] + return tap.lower() in taps + + +def add_tap(module, brew_path, tap): + '''Adds a single tap.''' + failed, changed, msg = False, False, '' + + if not a_valid_tap(tap): + failed = True + msg = 'not a valid tap: %s' % tap + + elif not already_tapped(module, brew_path, tap): + if module.check_mode: + module.exit_json(changed=True) + + rc, out, err = module.run_command([ + brew_path, + 'tap', + tap, + ]) + if already_tapped(module, brew_path, tap): + changed = True + msg = 'successfully tapped: %s' % tap + else: + failed = True + msg = 'failed to tap: %s' % tap + + else: + msg = 'already tapped: %s' % tap + + return (failed, changed, msg) + + +def add_taps(module, brew_path, taps): + '''Adds one or more taps.''' + failed, unchanged, added, msg = False, 0, 0, '' + + for tap in taps: + (failed, changed, msg) = add_tap(module, brew_path, tap) + if failed: + break + if changed: + added += 1 + else: + unchanged += 1 + + if failed: + msg = 'added: %d, unchanged: %d, error: ' + msg + msg = msg % (added, unchanged) + elif added: + changed = True + msg = 'added: %d, unchanged: %d' % (added, unchanged) + else: + msg = 'added: %d, unchanged: %d' % (added, unchanged) + + return (failed, changed, msg) + + +def remove_tap(module, brew_path, tap): + '''Removes a single tap.''' + failed, changed, msg = False, False, '' + + if not a_valid_tap(tap): + failed = True + msg = 'not a valid tap: %s' % tap + + elif already_tapped(module, brew_path, tap): + if module.check_mode: + module.exit_json(changed=True) + + rc, out, err = module.run_command([ + brew_path, + 'untap', + tap, + ]) + if not already_tapped(module, brew_path, tap): + changed = True + msg = 'successfully untapped: %s' % tap + else: + failed = True + msg = 'failed to untap: %s' % tap + + else: + msg = 'already untapped: %s' % tap + + return (failed, changed, msg) + + +def remove_taps(module, brew_path, taps): + '''Removes one or more taps.''' + failed, unchanged, removed, msg = False, 0, 0, '' + + for tap in taps: + (failed, changed, msg) = remove_tap(module, brew_path, tap) + if failed: + break + if changed: + removed += 1 + else: + unchanged += 1 + + if failed: + msg = 'removed: %d, unchanged: %d, error: ' + msg + msg = msg % (removed, unchanged) + elif removed: + changed = True + msg = 'removed: %d, unchanged: %d' % (removed, unchanged) + else: + msg = 'removed: %d, unchanged: %d' % (removed, unchanged) + + return (failed, changed, msg) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(aliases=['tap'], required=True), + state=dict(default='present', choices=['present', 'absent']), + ), + supports_check_mode=True, + ) + + brew_path = module.get_bin_path( + 'brew', + required=True, + opt_dirs=['/usr/local/bin'] + ) + + taps = module.params['name'].split(',') + + if module.params['state'] == 'present': + failed, changed, msg = add_taps(module, brew_path, taps) + + if failed: + module.fail_json(msg=msg) + else: + module.exit_json(changed=changed, msg=msg) + + elif module.params['state'] == 'absent': + failed, changed, msg = remove_taps(module, brew_path, taps) + + if failed: + module.fail_json(msg=msg) + else: + module.exit_json(changed=changed, msg=msg) + +# this is magic, see lib/ansible/module_common.py +#<> +main() diff --git a/lib/ansible/modules/extras/packaging/layman b/lib/ansible/modules/extras/packaging/layman new file mode 100644 index 00000000000..57c03528c9e --- /dev/null +++ b/lib/ansible/modules/extras/packaging/layman @@ -0,0 +1,236 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Jakub Jirutka +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import shutil +from os import path +from urllib2 import Request, urlopen, URLError + +DOCUMENTATION = ''' +--- +module: layman +author: Jakub Jirutka +version_added: "1.6" +short_description: Manage Gentoo overlays +description: + - Uses Layman to manage an additional repositories for the Portage package manager on Gentoo Linux. + Please note that Layman must be installed on a managed node prior using this module. +options: + name: + description: + - The overlay id to install, synchronize, or uninstall. + Use 'ALL' to sync all of the installed overlays (can be used only when C(state=updated)). + required: true + list_url: + description: + - An URL of the alternative overlays list that defines the overlay to install. + This list will be fetched and saved under C(${overlay_defs})/${name}.xml), where + C(overlay_defs) is readed from the Layman's configuration. + required: false + state: + description: + - Whether to install (C(present)), sync (C(updated)), or uninstall (C(absent)) the overlay. + required: false + default: present + choices: [present, absent, updated] +''' + +EXAMPLES = ''' +# Install the overlay 'mozilla' which is on the central overlays list. +- layman: name=mozilla + +# Install the overlay 'cvut' from the specified alternative list. +- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml + +# Update (sync) the overlay 'cvut', or install if not installed yet. +- layman: name=cvut list_url=http://raw.github.com/cvut/gentoo-overlay/master/overlay.xml state=updated + +# Update (sync) all of the installed overlays. +- layman: name=ALL state=updated + +# Uninstall the overlay 'cvut'. +- layman: name=cvut state=absent +''' + +USERAGENT = 'ansible-httpget' + +try: + from layman.api import LaymanAPI + from layman.config import BareConfig + HAS_LAYMAN_API = True +except ImportError: + HAS_LAYMAN_API = False + + +class ModuleError(Exception): pass + + +def init_layman(config=None): + '''Returns the initialized ``LaymanAPI``. + + :param config: the layman's configuration to use (optional) + ''' + if config is None: config = BareConfig(read_configfile=True, quietness=1) + return LaymanAPI(config) + + +def download_url(url, dest): + ''' + :param url: the URL to download + :param dest: the absolute path of where to save the downloaded content to; + it must be writable and not a directory + + :raises ModuleError + ''' + request = Request(url) + request.add_header('User-agent', USERAGENT) + + try: + response = urlopen(request) + except URLError, e: + raise ModuleError("Failed to get %s: %s" % (url, str(e))) + + try: + with open(dest, 'w') as f: + shutil.copyfileobj(response, f) + except IOError, e: + raise ModuleError("Failed to write: %s" % str(e)) + + +def install_overlay(name, list_url=None): + '''Installs the overlay repository. If not on the central overlays list, + then :list_url of an alternative list must be provided. The list will be + fetched and saved under ``%(overlay_defs)/%(name.xml)`` (location of the + ``overlay_defs`` is read from the Layman's configuration). + + :param name: the overlay id + :param list_url: the URL of the remote repositories list to look for the overlay + definition (optional, default: None) + + :returns: True if the overlay was installed, or False if already exists + (i.e. nothing has changed) + :raises ModuleError + ''' + # read Layman configuration + layman_conf = BareConfig(read_configfile=True) + layman = init_layman(layman_conf) + + if layman.is_installed(name): + return False + + if not layman.is_repo(name): + if not list_url: raise ModuleError("Overlay '%s' is not on the list of known " \ + "overlays and URL of the remote list was not provided." % name) + + overlay_defs = layman_conf.get_option('overlay_defs') + dest = path.join(overlay_defs, name + '.xml') + + download_url(list_url, dest) + + # reload config + layman = init_layman() + + if not layman.add_repos(name): raise ModuleError(layman.get_errors()) + + return True + + +def uninstall_overlay(name): + '''Uninstalls the given overlay repository from the system. + + :param name: the overlay id to uninstall + + :returns: True if the overlay was uninstalled, or False if doesn't exist + (i.e. nothing has changed) + :raises ModuleError + ''' + layman = init_layman() + + if not layman.is_installed(name): + return False + + layman.delete_repos(name) + if layman.get_errors(): raise ModuleError(layman.get_errors()) + + return True + + +def sync_overlay(name): + '''Synchronizes the specified overlay repository. + + :param name: the overlay repository id to sync + :raises ModuleError + ''' + layman = init_layman() + + if not layman.sync(name): + messages = [ str(item[1]) for item in layman.sync_results[2] ] + raise ModuleError(messages) + + +def sync_overlays(): + '''Synchronize all of the installed overlays. + + :raises ModuleError + ''' + layman = init_layman() + + for name in layman.get_installed(): + sync_overlay(name) + + +def main(): + # define module + module = AnsibleModule( + argument_spec = { + 'name': { 'required': True }, + 'list_url': { 'aliases': ['url'] }, + 'state': { 'default': "present", 'choices': ['present', 'absent', 'updated'] }, + } + ) + + if not HAS_LAYMAN_API: + module.fail_json(msg='Layman is not installed') + + state, name, url = (module.params[key] for key in ['state', 'name', 'list_url']) + + changed = False + try: + if state == 'present': + changed = install_overlay(name, url) + + elif state == 'updated': + if name == 'ALL': + sync_overlays() + elif install_overlay(name, url): + changed = True + else: + sync_overlay(name) + else: + changed = uninstall_overlay(name) + + except ModuleError, e: + module.fail_json(msg=e.message) + else: + module.exit_json(changed=changed, name=name) + + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/packaging/macports b/lib/ansible/modules/extras/packaging/macports new file mode 100644 index 00000000000..ae7010b1cbd --- /dev/null +++ b/lib/ansible/modules/extras/packaging/macports @@ -0,0 +1,217 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Jimmy Tang +# Based on okpg (Patrick Pelletier ), pacman +# (Afterburn) and pkgin (Shaun Zinck) modules +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +DOCUMENTATION = ''' +--- +module: macports +author: Jimmy Tang +short_description: Package manager for MacPorts +description: + - Manages MacPorts packages +version_added: "1.1" +options: + name: + description: + - name of package to install/remove + required: true + state: + description: + - state of the package + choices: [ 'present', 'absent', 'active', 'inactive' ] + required: false + default: present + update_cache: + description: + - update the package db first + required: false + default: "no" + choices: [ "yes", "no" ] +notes: [] +''' +EXAMPLES = ''' +- macports: name=foo state=present +- macports: name=foo state=present update_cache=yes +- macports: name=foo state=absent +- macports: name=foo state=active +- macports: name=foo state=inactive +''' + +import pipes + +def update_package_db(module, port_path): + """ Updates packages list. """ + + rc, out, err = module.run_command("%s sync" % port_path) + + if rc != 0: + module.fail_json(msg="could not update package db") + + +def query_package(module, port_path, name, state="present"): + """ Returns whether a package is installed or not. """ + + if state == "present": + + rc, out, err = module.run_command("%s installed | grep -q ^.*%s" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True) + if rc == 0: + return True + + return False + + elif state == "active": + + rc, out, err = module.run_command("%s installed %s | grep -q active" % (pipes.quote(port_path), pipes.quote(name)), use_unsafe_shell=True) + + if rc == 0: + return True + + return False + + +def remove_packages(module, port_path, packages): + """ Uninstalls one or more packages if installed. """ + + remove_c = 0 + # Using a for loop incase of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, port_path, package): + continue + + rc, out, err = module.run_command("%s uninstall %s" % (port_path, package)) + + if query_package(module, port_path, package): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, port_path, packages): + """ Installs one or more packages if not already installed. """ + + install_c = 0 + + for package in packages: + if query_package(module, port_path, package): + continue + + rc, out, err = module.run_command("%s install %s" % (port_path, package)) + + if not query_package(module, port_path, package): + module.fail_json(msg="failed to install %s: %s" % (package, out)) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) + + module.exit_json(changed=False, msg="package(s) already present") + + +def activate_packages(module, port_path, packages): + """ Activate a package if it's inactive. """ + + activate_c = 0 + + for package in packages: + if not query_package(module, port_path, package): + module.fail_json(msg="failed to activate %s, package(s) not present" % (package)) + + if query_package(module, port_path, package, state="active"): + continue + + rc, out, err = module.run_command("%s activate %s" % (port_path, package)) + + if not query_package(module, port_path, package, state="active"): + module.fail_json(msg="failed to activate %s: %s" % (package, out)) + + activate_c += 1 + + if activate_c > 0: + module.exit_json(changed=True, msg="activated %s package(s)" % (activate_c)) + + module.exit_json(changed=False, msg="package(s) already active") + + +def deactivate_packages(module, port_path, packages): + """ Deactivate a package if it's active. """ + + deactivated_c = 0 + + for package in packages: + if not query_package(module, port_path, package): + module.fail_json(msg="failed to activate %s, package(s) not present" % (package)) + + if not query_package(module, port_path, package, state="active"): + continue + + rc, out, err = module.run_command("%s deactivate %s" % (port_path, package)) + + if query_package(module, port_path, package, state="active"): + module.fail_json(msg="failed to deactivated %s: %s" % (package, out)) + + deactivated_c += 1 + + if deactivated_c > 0: + module.exit_json(changed=True, msg="deactivated %s package(s)" % (deactivated_c)) + + module.exit_json(changed=False, msg="package(s) already inactive") + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(aliases=["pkg"], required=True), + state = dict(default="present", choices=["present", "installed", "absent", "removed", "active", "inactive"]), + update_cache = dict(default="no", aliases=["update-cache"], type='bool') + ) + ) + + port_path = module.get_bin_path('port', True, ['/opt/local/bin']) + + p = module.params + + if p["update_cache"]: + update_package_db(module, port_path) + + pkgs = p["name"].split(",") + + if p["state"] in ["present", "installed"]: + install_packages(module, port_path, pkgs) + + elif p["state"] in ["absent", "removed"]: + remove_packages(module, port_path, pkgs) + + elif p["state"] == "active": + activate_packages(module, port_path, pkgs) + + elif p["state"] == "inactive": + deactivate_packages(module, port_path, pkgs) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/packaging/npm b/lib/ansible/modules/extras/packaging/npm new file mode 100644 index 00000000000..1dd2e998492 --- /dev/null +++ b/lib/ansible/modules/extras/packaging/npm @@ -0,0 +1,263 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Chris Hoffman +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: npm +short_description: Manage node.js packages with npm +description: + - Manage node.js packages with Node Package Manager (npm) +version_added: 1.2 +author: Chris Hoffman +options: + name: + description: + - The name of a node.js library to install + required: false + path: + description: + - The base path where to install the node.js libraries + required: false + version: + description: + - The version to be installed + required: false + global: + description: + - Install the node.js library globally + required: false + default: no + choices: [ "yes", "no" ] + executable: + description: + - The executable location for npm. + - This is useful if you are using a version manager, such as nvm + required: false + ignore_scripts: + description: + - Use the --ignore-scripts flag when installing. + required: false + choices: [ "yes", "no" ] + default: no + version_added: "1.8" + production: + description: + - Install dependencies in production mode, excluding devDependencies + required: false + choices: [ "yes", "no" ] + default: no + registry: + description: + - The registry to install modules from. + required: false + version_added: "1.6" + state: + description: + - The state of the node.js library + required: false + default: present + choices: [ "present", "absent", "latest" ] +''' + +EXAMPLES = ''' +description: Install "coffee-script" node.js package. +- npm: name=coffee-script path=/app/location + +description: Install "coffee-script" node.js package on version 1.6.1. +- npm: name=coffee-script version=1.6.1 path=/app/location + +description: Install "coffee-script" node.js package globally. +- npm: name=coffee-script global=yes + +description: Remove the globally package "coffee-script". +- npm: name=coffee-script global=yes state=absent + +description: Install "coffee-script" node.js package from custom registry. +- npm: name=coffee-script registry=http://registry.mysite.com + +description: Install packages based on package.json. +- npm: path=/app/location + +description: Update packages based on package.json to their latest version. +- npm: path=/app/location state=latest + +description: Install packages based on package.json using the npm installed with nvm v0.10.1. +- npm: path=/app/location executable=/opt/nvm/v0.10.1/bin/npm state=present +''' + +import os + +try: + import json +except ImportError: + import simplejson as json + +class Npm(object): + def __init__(self, module, **kwargs): + self.module = module + self.glbl = kwargs['glbl'] + self.name = kwargs['name'] + self.version = kwargs['version'] + self.path = kwargs['path'] + self.registry = kwargs['registry'] + self.production = kwargs['production'] + self.ignore_scripts = kwargs['ignore_scripts'] + + if kwargs['executable']: + self.executable = kwargs['executable'].split(' ') + else: + self.executable = [module.get_bin_path('npm', True)] + + if kwargs['version']: + self.name_version = self.name + '@' + self.version + else: + self.name_version = self.name + + def _exec(self, args, run_in_check_mode=False, check_rc=True): + if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): + cmd = self.executable + args + + if self.glbl: + cmd.append('--global') + if self.production: + cmd.append('--production') + if self.ignore_scripts: + cmd.append('--ignore-scripts') + if self.name: + cmd.append(self.name_version) + if self.registry: + cmd.append('--registry') + cmd.append(self.registry) + + #If path is specified, cd into that path and run the command. + cwd = None + if self.path: + if not os.path.exists(self.path): + os.makedirs(self.path) + if not os.path.isdir(self.path): + self.module.fail_json(msg="path %s is not a directory" % self.path) + cwd = self.path + + rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) + return out + return '' + + def list(self): + cmd = ['list', '--json'] + + installed = list() + missing = list() + data = json.loads(self._exec(cmd, True, False)) + if 'dependencies' in data: + for dep in data['dependencies']: + if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']: + missing.append(dep) + elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']: + missing.append(dep) + else: + installed.append(dep) + if self.name and self.name not in installed: + missing.append(self.name) + #Named dependency not installed + else: + missing.append(self.name) + + return installed, missing + + def install(self): + return self._exec(['install']) + + def update(self): + return self._exec(['update']) + + def uninstall(self): + return self._exec(['uninstall']) + + def list_outdated(self): + outdated = list() + data = self._exec(['outdated'], True, False) + for dep in data.splitlines(): + if dep: + # node.js v0.10.22 changed the `npm outdated` module separator + # from "@" to " ". Split on both for backwards compatibility. + pkg, other = re.split('\s|@', dep, 1) + outdated.append(pkg) + + return outdated + + +def main(): + arg_spec = dict( + name=dict(default=None), + path=dict(default=None), + version=dict(default=None), + production=dict(default='no', type='bool'), + executable=dict(default=None), + registry=dict(default=None), + state=dict(default='present', choices=['present', 'absent', 'latest']), + ignore_scripts=dict(default=False, type='bool'), + ) + arg_spec['global'] = dict(default='no', type='bool') + module = AnsibleModule( + argument_spec=arg_spec, + supports_check_mode=True + ) + + name = module.params['name'] + path = module.params['path'] + version = module.params['version'] + glbl = module.params['global'] + production = module.params['production'] + executable = module.params['executable'] + registry = module.params['registry'] + state = module.params['state'] + ignore_scripts = module.params['ignore_scripts'] + + if not path and not glbl: + module.fail_json(msg='path must be specified when not using global') + if state == 'absent' and not name: + module.fail_json(msg='uninstalling a package is only available for named packages') + + npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, \ + executable=executable, registry=registry, ignore_scripts=ignore_scripts) + + changed = False + if state == 'present': + installed, missing = npm.list() + if len(missing): + changed = True + npm.install() + elif state == 'latest': + installed, missing = npm.list() + outdated = npm.list_outdated() + if len(missing) or len(outdated): + changed = True + npm.install() + else: #absent + installed, missing = npm.list() + if name in installed: + changed = True + npm.uninstall() + + module.exit_json(changed=changed) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/packaging/openbsd_pkg b/lib/ansible/modules/extras/packaging/openbsd_pkg new file mode 100644 index 00000000000..790fa89fac9 --- /dev/null +++ b/lib/ansible/modules/extras/packaging/openbsd_pkg @@ -0,0 +1,373 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Patrik Lundin +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import re +import shlex +import syslog + +DOCUMENTATION = ''' +--- +module: openbsd_pkg +author: Patrik Lundin +version_added: "1.1" +short_description: Manage packages on OpenBSD. +description: + - Manage packages on OpenBSD using the pkg tools. +options: + name: + required: true + description: + - Name of the package. + state: + required: true + choices: [ present, latest, absent ] + description: + - C(present) will make sure the package is installed. + C(latest) will make sure the latest version of the package is installed. + C(absent) will make sure the specified package is not installed. +''' + +EXAMPLES = ''' +# Make sure nmap is installed +- openbsd_pkg: name=nmap state=present + +# Make sure nmap is the latest version +- openbsd_pkg: name=nmap state=latest + +# Make sure nmap is not installed +- openbsd_pkg: name=nmap state=absent + +# Specify a pkg flavour with '--' +- openbsd_pkg: name=vim--nox11 state=present + +# Specify the default flavour to avoid ambiguity errors +- openbsd_pkg: name=vim-- state=present +''' + +# Control if we write debug information to syslog. +debug = False + +# Function used for executing commands. +def execute_command(cmd, module): + if debug: + syslog.syslog("execute_command(): cmd = %s" % cmd) + # Break command line into arguments. + # This makes run_command() use shell=False which we need to not cause shell + # expansion of special characters like '*'. + cmd_args = shlex.split(cmd) + return module.run_command(cmd_args) + +# Function used for getting the name of a currently installed package. +def get_current_name(name, pkg_spec, module): + info_cmd = 'pkg_info' + (rc, stdout, stderr) = execute_command("%s" % (info_cmd), module) + if rc != 0: + return (rc, stdout, stderr) + + if pkg_spec['version']: + pattern = "^%s" % name + elif pkg_spec['flavor']: + pattern = "^%s-.*-%s\s" % (pkg_spec['stem'], pkg_spec['flavor']) + else: + pattern = "^%s-" % pkg_spec['stem'] + + if debug: + syslog.syslog("get_current_name(): pattern = %s" % pattern) + + for line in stdout.splitlines(): + if debug: + syslog.syslog("get_current_name: line = %s" % line) + match = re.search(pattern, line) + if match: + current_name = line.split()[0] + + return current_name + +# Function used to find out if a package is currently installed. +def get_package_state(name, pkg_spec, module): + info_cmd = 'pkg_info -e' + + if pkg_spec['version']: + command = "%s %s" % (info_cmd, name) + elif pkg_spec['flavor']: + command = "%s %s-*-%s" % (info_cmd, pkg_spec['stem'], pkg_spec['flavor']) + else: + command = "%s %s-*" % (info_cmd, pkg_spec['stem']) + + rc, stdout, stderr = execute_command(command, module) + + if (stderr): + module.fail_json(msg="failed in get_package_state(): " + stderr) + + if rc == 0: + return True + else: + return False + +# Function used to make sure a package is present. +def package_present(name, installed_state, pkg_spec, module): + if module.check_mode: + install_cmd = 'pkg_add -Imn' + else: + install_cmd = 'pkg_add -Im' + + if installed_state is False: + + # Attempt to install the package + (rc, stdout, stderr) = execute_command("%s %s" % (install_cmd, name), module) + + # The behaviour of pkg_add is a bit different depending on if a + # specific version is supplied or not. + # + # When a specific version is supplied the return code will be 0 when + # a package is found and 1 when it is not, if a version is not + # supplied the tool will exit 0 in both cases: + if pkg_spec['version']: + # Depend on the return code. + if debug: + syslog.syslog("package_present(): depending on return code") + if rc: + changed=False + else: + # Depend on stderr instead. + if debug: + syslog.syslog("package_present(): depending on stderr") + if stderr: + # There is a corner case where having an empty directory in + # installpath prior to the right location will result in a + # "file:/local/package/directory/ is empty" message on stderr + # while still installing the package, so we need to look for + # for a message like "packagename-1.0: ok" just in case. + match = re.search("\W%s-[^:]+: ok\W" % name, stdout) + if match: + # It turns out we were able to install the package. + if debug: + syslog.syslog("package_present(): we were able to install package") + pass + else: + # We really did fail, fake the return code. + if debug: + syslog.syslog("package_present(): we really did fail") + rc = 1 + changed=False + else: + if debug: + syslog.syslog("package_present(): stderr was not set") + + if rc == 0: + if module.check_mode: + module.exit_json(changed=True) + + changed=True + + else: + rc = 0 + stdout = '' + stderr = '' + changed=False + + return (rc, stdout, stderr, changed) + +# Function used to make sure a package is the latest available version. +def package_latest(name, installed_state, pkg_spec, module): + if module.check_mode: + upgrade_cmd = 'pkg_add -umn' + else: + upgrade_cmd = 'pkg_add -um' + + pre_upgrade_name = '' + + if installed_state is True: + + # Fetch name of currently installed package. + pre_upgrade_name = get_current_name(name, pkg_spec, module) + + if debug: + syslog.syslog("package_latest(): pre_upgrade_name = %s" % pre_upgrade_name) + + # Attempt to upgrade the package. + (rc, stdout, stderr) = execute_command("%s %s" % (upgrade_cmd, name), module) + + # Look for output looking something like "nmap-6.01->6.25: ok" to see if + # something changed (or would have changed). Use \W to delimit the match + # from progress meter output. + match = re.search("\W%s->.+: ok\W" % pre_upgrade_name, stdout) + if match: + if module.check_mode: + module.exit_json(changed=True) + + changed = True + else: + changed = False + + # FIXME: This part is problematic. Based on the issues mentioned (and + # handled) in package_present() it is not safe to blindly trust stderr + # as an indicator that the command failed, and in the case with + # empty installpath directories this will break. + # + # For now keep this safeguard here, but ignore it if we managed to + # parse out a successful update above. This way we will report a + # successful run when we actually modify something but fail + # otherwise. + if changed != True: + if stderr: + rc=1 + + return (rc, stdout, stderr, changed) + + else: + # If package was not installed at all just make it present. + if debug: + syslog.syslog("package_latest(): package is not installed, calling package_present()") + return package_present(name, installed_state, pkg_spec, module) + +# Function used to make sure a package is not installed. +def package_absent(name, installed_state, module): + if module.check_mode: + remove_cmd = 'pkg_delete -In' + else: + remove_cmd = 'pkg_delete -I' + + if installed_state is True: + + # Attempt to remove the package. + rc, stdout, stderr = execute_command("%s %s" % (remove_cmd, name), module) + + if rc == 0: + if module.check_mode: + module.exit_json(changed=True) + + changed=True + else: + changed=False + + else: + rc = 0 + stdout = '' + stderr = '' + changed=False + + return (rc, stdout, stderr, changed) + +# Function used to parse the package name based on packages-specs(7) +# The general name structure is "stem-version[-flavors]" +def parse_package_name(name, pkg_spec, module): + # Do some initial matches so we can base the more advanced regex on that. + version_match = re.search("-[0-9]", name) + versionless_match = re.search("--", name) + + # Stop if someone is giving us a name that both has a version and is + # version-less at the same time. + if version_match and versionless_match: + module.fail_json(msg="Package name both has a version and is version-less: " + name) + + # If name includes a version. + if version_match: + match = re.search("^(?P.*)-(?P[0-9][^-]*)(?P-)?(?P[a-z].*)?$", name) + if match: + pkg_spec['stem'] = match.group('stem') + pkg_spec['version_separator'] = '-' + pkg_spec['version'] = match.group('version') + pkg_spec['flavor_separator'] = match.group('flavor_separator') + pkg_spec['flavor'] = match.group('flavor') + else: + module.fail_json(msg="Unable to parse package name at version_match: " + name) + + # If name includes no version but is version-less ("--"). + elif versionless_match: + match = re.search("^(?P.*)--(?P[a-z].*)?$", name) + if match: + pkg_spec['stem'] = match.group('stem') + pkg_spec['version_separator'] = '-' + pkg_spec['version'] = None + pkg_spec['flavor_separator'] = '-' + pkg_spec['flavor'] = match.group('flavor') + else: + module.fail_json(msg="Unable to parse package name at versionless_match: " + name) + + # If name includes no version, and is not version-less, it is all a stem. + else: + match = re.search("^(?P.*)$", name) + if match: + pkg_spec['stem'] = match.group('stem') + pkg_spec['version_separator'] = None + pkg_spec['version'] = None + pkg_spec['flavor_separator'] = None + pkg_spec['flavor'] = None + else: + module.fail_json(msg="Unable to parse package name at else: " + name) + + # Sanity check that there are no trailing dashes in flavor. + # Try to stop strange stuff early so we can be strict later. + if pkg_spec['flavor']: + match = re.search("-$", pkg_spec['flavor']) + if match: + module.fail_json(msg="Trailing dash in flavor: " + pkg_spec['flavor']) + +# =========================================== +# Main control flow + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + state = dict(required=True, choices=['absent', 'installed', 'latest', 'present', 'removed']), + ), + supports_check_mode = True + ) + + name = module.params['name'] + state = module.params['state'] + + rc = 0 + stdout = '' + stderr = '' + result = {} + result['name'] = name + result['state'] = state + + # Parse package name and put results in the pkg_spec dictionary. + pkg_spec = {} + parse_package_name(name, pkg_spec, module) + + # Get package state. + installed_state = get_package_state(name, pkg_spec, module) + + # Perform requested action. + if state in ['installed', 'present']: + (rc, stdout, stderr, changed) = package_present(name, installed_state, pkg_spec, module) + elif state in ['absent', 'removed']: + (rc, stdout, stderr, changed) = package_absent(name, installed_state, module) + elif state == 'latest': + (rc, stdout, stderr, changed) = package_latest(name, installed_state, pkg_spec, module) + + if rc != 0: + if stderr: + module.fail_json(msg=stderr) + else: + module.fail_json(msg=stdout) + + result['changed'] = changed + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/packaging/opkg b/lib/ansible/modules/extras/packaging/opkg new file mode 100644 index 00000000000..0187abe56a8 --- /dev/null +++ b/lib/ansible/modules/extras/packaging/opkg @@ -0,0 +1,150 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Patrick Pelletier +# Based on pacman (Afterburn) and pkgin (Shaun Zinck) modules +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +DOCUMENTATION = ''' +--- +module: opkg +author: Patrick Pelletier +short_description: Package manager for OpenWrt +description: + - Manages OpenWrt packages +version_added: "1.1" +options: + name: + description: + - name of package to install/remove + required: true + state: + description: + - state of the package + choices: [ 'present', 'absent' ] + required: false + default: present + update_cache: + description: + - update the package db first + required: false + default: "no" + choices: [ "yes", "no" ] +notes: [] +''' +EXAMPLES = ''' +- opkg: name=foo state=present +- opkg: name=foo state=present update_cache=yes +- opkg: name=foo state=absent +- opkg: name=foo,bar state=absent +''' + +import pipes + +def update_package_db(module, opkg_path): + """ Updates packages list. """ + + rc, out, err = module.run_command("%s update" % opkg_path) + + if rc != 0: + module.fail_json(msg="could not update package db") + + +def query_package(module, opkg_path, name, state="present"): + """ Returns whether a package is installed or not. """ + + if state == "present": + + rc, out, err = module.run_command("%s list-installed | grep -q ^%s" % (pipes.quote(opkg_path), pipes.quote(name)), use_unsafe_shell=True) + if rc == 0: + return True + + return False + + +def remove_packages(module, opkg_path, packages): + """ Uninstalls one or more packages if installed. """ + + remove_c = 0 + # Using a for loop incase of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, opkg_path, package): + continue + + rc, out, err = module.run_command("%s remove %s" % (opkg_path, package)) + + if query_package(module, opkg_path, package): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, opkg_path, packages): + """ Installs one or more packages if not already installed. """ + + install_c = 0 + + for package in packages: + if query_package(module, opkg_path, package): + continue + + rc, out, err = module.run_command("%s install %s" % (opkg_path, package)) + + if not query_package(module, opkg_path, package): + module.fail_json(msg="failed to install %s: %s" % (package, out)) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) + + module.exit_json(changed=False, msg="package(s) already present") + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(aliases=["pkg"], required=True), + state = dict(default="present", choices=["present", "installed", "absent", "removed"]), + update_cache = dict(default="no", aliases=["update-cache"], type='bool') + ) + ) + + opkg_path = module.get_bin_path('opkg', True, ['/bin']) + + p = module.params + + if p["update_cache"]: + update_package_db(module, opkg_path) + + pkgs = p["name"].split(",") + + if p["state"] in ["present", "installed"]: + install_packages(module, opkg_path, pkgs) + + elif p["state"] in ["absent", "removed"]: + remove_packages(module, opkg_path, pkgs) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/packaging/pacman b/lib/ansible/modules/extras/packaging/pacman new file mode 100644 index 00000000000..0b23a2f93ce --- /dev/null +++ b/lib/ansible/modules/extras/packaging/pacman @@ -0,0 +1,234 @@ +#!/usr/bin/python -tt +# -*- coding: utf-8 -*- + +# (c) 2012, Afterburn +# (c) 2013, Aaron Bull Schaefer +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: pacman +short_description: Manage packages with I(pacman) +description: + - Manage packages with the I(pacman) package manager, which is used by + Arch Linux and its variants. +version_added: "1.0" +author: Afterburn +notes: [] +requirements: [] +options: + name: + description: + - Name of the package to install, upgrade, or remove. + required: false + default: null + + state: + description: + - Desired state of the package. + required: false + default: "present" + choices: ["present", "absent"] + + recurse: + description: + - When removing a package, also remove its dependencies, provided + that they are not required by other packages and were not + explicitly installed by a user. + required: false + default: "no" + choices: ["yes", "no"] + version_added: "1.3" + + update_cache: + description: + - Whether or not to refresh the master package lists. This can be + run as part of a package installation or as a separate step. + required: false + default: "no" + choices: ["yes", "no"] +''' + +EXAMPLES = ''' +# Install package foo +- pacman: name=foo state=present + +# Remove packages foo and bar +- pacman: name=foo,bar state=absent + +# Recursively remove package baz +- pacman: name=baz state=absent recurse=yes + +# Run the equivalent of "pacman -Syy" as a separate step +- pacman: update_cache=yes +''' + +import json +import shlex +import os +import re +import sys + +PACMAN_PATH = "/usr/bin/pacman" + +def query_package(module, name, state="present"): + # pacman -Q returns 0 if the package is installed, + # 1 if it is not installed + if state == "present": + cmd = "pacman -Q %s" % (name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc == 0: + return True + + return False + + +def update_package_db(module): + cmd = "pacman -Syy" + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc == 0: + return True + else: + module.fail_json(msg="could not update package db") + + +def remove_packages(module, packages): + if module.params["recurse"]: + args = "Rs" + else: + args = "R" + + remove_c = 0 + # Using a for loop incase of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, package): + continue + + cmd = "pacman -%s %s --noconfirm" % (args, package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to remove %s" % (package)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, packages, package_files): + install_c = 0 + + for i, package in enumerate(packages): + if query_package(module, package): + continue + + if package_files[i]: + params = '-U %s' % package_files[i] + else: + params = '-S %s' % package + + cmd = "pacman %s --noconfirm" % (params) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to install %s" % (package)) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="installed %s package(s)" % (install_c)) + + module.exit_json(changed=False, msg="package(s) already installed") + + +def check_packages(module, packages, state): + would_be_changed = [] + for package in packages: + installed = query_package(module, package) + if ((state == "present" and not installed) or + (state == "absent" and installed)): + would_be_changed.append(package) + if would_be_changed: + if state == "absent": + state = "removed" + module.exit_json(changed=True, msg="%s package(s) would be %s" % ( + len(would_be_changed), state)) + else: + module.exit_json(change=False, msg="package(s) already %s" % state) + + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(aliases=['pkg']), + state = dict(default='present', choices=['present', 'installed', 'absent', 'removed']), + recurse = dict(default='no', choices=BOOLEANS, type='bool'), + update_cache = dict(default='no', aliases=['update-cache'], choices=BOOLEANS, type='bool')), + required_one_of = [['name', 'update_cache']], + supports_check_mode = True) + + if not os.path.exists(PACMAN_PATH): + module.fail_json(msg="cannot find pacman, looking for %s" % (PACMAN_PATH)) + + p = module.params + + # normalize the state parameter + if p['state'] in ['present', 'installed']: + p['state'] = 'present' + elif p['state'] in ['absent', 'removed']: + p['state'] = 'absent' + + if p["update_cache"] and not module.check_mode: + update_package_db(module) + if not p['name']: + module.exit_json(changed=True, msg='updated the package master lists') + + if p['update_cache'] and module.check_mode and not p['name']: + module.exit_json(changed=True, msg='Would have updated the package cache') + + if p['name']: + pkgs = p['name'].split(',') + + pkg_files = [] + for i, pkg in enumerate(pkgs): + if pkg.endswith('.pkg.tar.xz'): + # The package given is a filename, extract the raw pkg name from + # it and store the filename + pkg_files.append(pkg) + pkgs[i] = re.sub('-[0-9].*$', '', pkgs[i].split('/')[-1]) + else: + pkg_files.append(None) + + if module.check_mode: + check_packages(module, pkgs, p['state']) + + if p['state'] == 'present': + install_packages(module, pkgs, pkg_files) + elif p['state'] == 'absent': + remove_packages(module, pkgs) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/packaging/pkgin b/lib/ansible/modules/extras/packaging/pkgin new file mode 100755 index 00000000000..866c9f76a4c --- /dev/null +++ b/lib/ansible/modules/extras/packaging/pkgin @@ -0,0 +1,168 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Shaun Zinck +# Written by Shaun Zinck +# Based on pacman module written by Afterburn +# that was based on apt module written by Matthew Williams +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +DOCUMENTATION = ''' +--- +module: pkgin +short_description: Package manager for SmartOS +description: + - Manages SmartOS packages +version_added: "1.0" +options: + name: + description: + - name of package to install/remove + required: true + state: + description: + - state of the package + choices: [ 'present', 'absent' ] + required: false + default: present +author: Shaun Zinck +notes: [] +''' + +EXAMPLES = ''' +# install package foo" +- pkgin: name=foo state=present + +# remove package foo +- pkgin: name=foo state=absent + +# remove packages foo and bar +- pkgin: name=foo,bar state=absent +''' + + +import json +import shlex +import os +import sys +import pipes + +def query_package(module, pkgin_path, name, state="present"): + + if state == "present": + + rc, out, err = module.run_command("%s -y list | grep ^%s" % (pipes.quote(pkgin_path), pipes.quote(name)), use_unsafe_shell=True) + + if rc == 0: + # At least one package with a package name that starts with ``name`` + # is installed. For some cases this is not sufficient to determine + # wether the queried package is installed. + # + # E.g. for ``name='gcc47'``, ``gcc47`` not being installed, but + # ``gcc47-libs`` being installed, ``out`` would be: + # + # gcc47-libs-4.7.2nb4 The GNU Compiler Collection (GCC) support shared libraries. + # + # Multiline output is also possible, for example with the same query + # and bot ``gcc47`` and ``gcc47-libs`` being installed: + # + # gcc47-libs-4.7.2nb4 The GNU Compiler Collection (GCC) support shared libraries. + # gcc47-4.7.2nb3 The GNU Compiler Collection (GCC) - 4.7 Release Series + + # Loop over lines in ``out`` + for line in out.split('\n'): + + # Strip description + # (results in sth. like 'gcc47-libs-4.7.2nb4') + pkgname_with_version = out.split(' ')[0] + + # Strip version + # (results in sth like 'gcc47-libs') + pkgname_without_version = '-'.join(pkgname_with_version.split('-')[:-1]) + + if name == pkgname_without_version: + return True + + return False + + +def remove_packages(module, pkgin_path, packages): + + remove_c = 0 + # Using a for loop incase of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, pkgin_path, package): + continue + + rc, out, err = module.run_command("%s -y remove %s" % (pkgin_path, package)) + + if query_package(module, pkgin_path, package): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, pkgin_path, packages): + + install_c = 0 + + for package in packages: + if query_package(module, pkgin_path, package): + continue + + rc, out, err = module.run_command("%s -y install %s" % (pkgin_path, package)) + + if not query_package(module, pkgin_path, package): + module.fail_json(msg="failed to install %s: %s" % (package, out)) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="present %s package(s)" % (install_c)) + + module.exit_json(changed=False, msg="package(s) already present") + + + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(default="present", choices=["present","absent"]), + name = dict(aliases=["pkg"], required=True))) + + pkgin_path = module.get_bin_path('pkgin', True, ['/opt/local/bin']) + + p = module.params + + pkgs = p["name"].split(",") + + if p["state"] == "present": + install_packages(module, pkgin_path, pkgs) + + elif p["state"] == "absent": + remove_packages(module, pkgin_path, pkgs) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/packaging/pkgng b/lib/ansible/modules/extras/packaging/pkgng new file mode 100644 index 00000000000..a1f443fd4e1 --- /dev/null +++ b/lib/ansible/modules/extras/packaging/pkgng @@ -0,0 +1,301 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, bleader +# Written by bleader +# Based on pkgin module written by Shaun Zinck +# that was based on pacman module written by Afterburn +# that was based on apt module written by Matthew Williams +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +DOCUMENTATION = ''' +--- +module: pkgng +short_description: Package manager for FreeBSD >= 9.0 +description: + - Manage binary packages for FreeBSD using 'pkgng' which + is available in versions after 9.0. +version_added: "1.2" +options: + name: + description: + - name of package to install/remove + required: true + state: + description: + - state of the package + choices: [ 'present', 'absent' ] + required: false + default: present + cached: + description: + - use local package base or try to fetch an updated one + choices: [ 'yes', 'no' ] + required: false + default: no + annotation: + description: + - a comma-separated list of keyvalue-pairs of the form + <+/-/:>[=]. A '+' denotes adding an annotation, a + '-' denotes removing an annotation, and ':' denotes modifying an + annotation. + If setting or modifying annotations, a value must be provided. + required: false + version_added: "1.6" + pkgsite: + description: + - for pkgng versions before 1.1.4, specify packagesite to use + for downloading packages, if not specified, use settings from + /usr/local/etc/pkg.conf + for newer pkgng versions, specify a the name of a repository + configured in /usr/local/etc/pkg/repos + required: false +author: bleader +notes: + - When using pkgsite, be careful that already in cache packages won't be downloaded again. +''' + +EXAMPLES = ''' +# Install package foo +- pkgng: name=foo state=present + +# Annotate package foo and bar +- pkgng: name=foo,bar annotation=+test1=baz,-test2,:test3=foobar + +# Remove packages foo and bar +- pkgng: name=foo,bar state=absent +''' + + +import json +import shlex +import os +import re +import sys + +def query_package(module, pkgng_path, name): + + rc, out, err = module.run_command("%s info -g -e %s" % (pkgng_path, name)) + + if rc == 0: + return True + + return False + +def pkgng_older_than(module, pkgng_path, compare_version): + + rc, out, err = module.run_command("%s -v" % pkgng_path) + version = map(lambda x: int(x), re.split(r'[\._]', out)) + + i = 0 + new_pkgng = True + while compare_version[i] == version[i]: + i += 1 + if i == min(len(compare_version), len(version)): + break + else: + if compare_version[i] > version[i]: + new_pkgng = False + return not new_pkgng + + +def remove_packages(module, pkgng_path, packages): + + remove_c = 0 + # Using a for loop incase of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, pkgng_path, package): + continue + + if not module.check_mode: + rc, out, err = module.run_command("%s delete -y %s" % (pkgng_path, package)) + + if not module.check_mode and query_package(module, pkgng_path, package): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + return (True, "removed %s package(s)" % remove_c) + + return (False, "package(s) already absent") + + +def install_packages(module, pkgng_path, packages, cached, pkgsite): + + install_c = 0 + + # as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions + # in /usr/local/etc/pkg/repos + old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4]) + if pkgsite != "": + if old_pkgng: + pkgsite = "PACKAGESITE=%s" % (pkgsite) + else: + pkgsite = "-r %s" % (pkgsite) + + if not module.check_mode and not cached: + if old_pkgng: + rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path)) + else: + rc, out, err = module.run_command("%s update" % (pkgng_path)) + if rc != 0: + module.fail_json(msg="Could not update catalogue") + + for package in packages: + if query_package(module, pkgng_path, package): + continue + + if not module.check_mode: + if old_pkgng: + rc, out, err = module.run_command("%s %s install -g -U -y %s" % (pkgsite, pkgng_path, package)) + else: + rc, out, err = module.run_command("%s install %s -g -U -y %s" % (pkgng_path, pkgsite, package)) + + if not module.check_mode and not query_package(module, pkgng_path, package): + module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err) + + install_c += 1 + + if install_c > 0: + return (True, "added %s package(s)" % (install_c)) + + return (False, "package(s) already present") + +def annotation_query(module, pkgng_path, package, tag): + rc, out, err = module.run_command("%s info -g -A %s" % (pkgng_path, package)) + match = re.search(r'^\s*(?P%s)\s*:\s*(?P\w+)' % tag, out, flags=re.MULTILINE) + if match: + return match.group('value') + return False + + +def annotation_add(module, pkgng_path, package, tag, value): + _value = annotation_query(module, pkgng_path, package, tag) + if not _value: + # Annotation does not exist, add it. + rc, out, err = module.run_command('%s annotate -y -A %s %s "%s"' + % (pkgng_path, package, tag, value)) + if rc != 0: + module.fail_json("could not annotate %s: %s" + % (package, out), stderr=err) + return True + elif _value != value: + # Annotation exists, but value differs + module.fail_json( + mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s" + % (package, tag, _value, value)) + return False + else: + # Annotation exists, nothing to do + return False + +def annotation_delete(module, pkgng_path, package, tag, value): + _value = annotation_query(module, pkgng_path, package, tag) + if _value: + rc, out, err = module.run_command('%s annotate -y -D %s %s' + % (pkgng_path, package, tag)) + if rc != 0: + module.fail_json("could not delete annotation to %s: %s" + % (package, out), stderr=err) + return True + return False + +def annotation_modify(module, pkgng_path, package, tag, value): + _value = annotation_query(module, pkgng_path, package, tag) + if not value: + # No such tag + module.fail_json("could not change annotation to %s: tag %s does not exist" + % (package, tag)) + elif _value == value: + # No change in value + return False + else: + rc,out,err = module.run_command('%s annotate -y -M %s %s "%s"' + % (pkgng_path, package, tag, value)) + if rc != 0: + module.fail_json("could not change annotation annotation to %s: %s" + % (package, out), stderr=err) + return True + + +def annotate_packages(module, pkgng_path, packages, annotation): + annotate_c = 0 + annotations = map(lambda _annotation: + re.match(r'(?P[\+-:])(?P\w+)(=(?P\w+))?', + _annotation).groupdict(), + re.split(r',', annotation)) + + operation = { + '+': annotation_add, + '-': annotation_delete, + ':': annotation_modify + } + + for package in packages: + for _annotation in annotations: + annotate_c += ( 1 if operation[_annotation['operation']]( + module, pkgng_path, package, + _annotation['tag'], _annotation['value']) else 0 ) + + if annotate_c > 0: + return (True, "added %s annotations." % annotate_c) + return (False, "changed no annotations") + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(default="present", choices=["present","absent"], required=False), + name = dict(aliases=["pkg"], required=True), + cached = dict(default=False, type='bool'), + annotation = dict(default="", required=False), + pkgsite = dict(default="", required=False)), + supports_check_mode = True) + + pkgng_path = module.get_bin_path('pkg', True) + + p = module.params + + pkgs = p["name"].split(",") + + changed = False + msgs = [] + + if p["state"] == "present": + _changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"]) + changed = changed or _changed + msgs.append(_msg) + + elif p["state"] == "absent": + _changed, _msg = remove_packages(module, pkgng_path, pkgs) + changed = changed or _changed + msgs.append(_msg) + + if p["annotation"]: + _changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"]) + changed = changed or _changed + msgs.append(_msg) + + module.exit_json(changed=changed, msg=", ".join(msgs)) + + + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/packaging/pkgutil b/lib/ansible/modules/extras/packaging/pkgutil new file mode 100644 index 00000000000..78a7db72bf5 --- /dev/null +++ b/lib/ansible/modules/extras/packaging/pkgutil @@ -0,0 +1,179 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Alexander Winkler +# based on svr4pkg by +# Boyd Adamson (2012) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: pkgutil +short_description: Manage CSW-Packages on Solaris +description: + - Manages CSW packages (SVR4 format) on Solaris 10 and 11. + - These were the native packages on Solaris <= 10 and are available + as a legacy feature in Solaris 11. + - Pkgutil is an advanced packaging system, which resolves dependency on installation. + It is designed for CSW packages. +version_added: "1.3" +author: Alexander Winkler +options: + name: + description: + - Package name, e.g. (C(CSWnrpe)) + required: true + site: + description: + - Specifies the repository path to install the package from. + - Its global definition is done in C(/etc/opt/csw/pkgutil.conf). + state: + description: + - Whether to install (C(present)), or remove (C(absent)) a package. + - The upgrade (C(latest)) operation will update/install the package to the latest version available. + - "Note: The module has a limitation that (C(latest)) only works for one package, not lists of them." + required: true + choices: ["present", "absent", "latest"] +''' + +EXAMPLES = ''' +# Install a package +pkgutil: name=CSWcommon state=present + +# Install a package from a specific repository +pkgutil: name=CSWnrpe site='ftp://myinternal.repo/opencsw/kiel state=latest' +''' + +import os +import pipes + +def package_installed(module, name): + cmd = [module.get_bin_path('pkginfo', True)] + cmd.append('-q') + cmd.append(name) + rc, out, err = module.run_command(' '.join(cmd)) + if rc == 0: + return True + else: + return False + +def package_latest(module, name, site): + # Only supports one package + cmd = [ 'pkgutil', '--single', '-c' ] + if site is not None: + cmd += [ '-t', pipes.quote(site) ] + cmd.append(pipes.quote(name)) + cmd += [ '| tail -1 | grep -v SAME' ] + rc, out, err = module.run_command(' '.join(cmd), use_unsafe_shell=True) + if rc == 1: + return True + else: + return False + +def run_command(module, cmd): + progname = cmd[0] + cmd[0] = module.get_bin_path(progname, True) + return module.run_command(cmd) + +def package_install(module, state, name, site): + cmd = [ 'pkgutil', '-iy' ] + if site is not None: + cmd += [ '-t', site ] + if state == 'latest': + cmd += [ '-f' ] + cmd.append(name) + (rc, out, err) = run_command(module, cmd) + return (rc, out, err) + +def package_upgrade(module, name, site): + cmd = [ 'pkgutil', '-ufy' ] + if site is not None: + cmd += [ '-t', site ] + cmd.append(name) + (rc, out, err) = run_command(module, cmd) + return (rc, out, err) + +def package_uninstall(module, name): + cmd = [ 'pkgutil', '-ry', name] + (rc, out, err) = run_command(module, cmd) + return (rc, out, err) + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required = True), + state = dict(required = True, choices=['present', 'absent','latest']), + site = dict(default = None), + ), + supports_check_mode=True + ) + name = module.params['name'] + state = module.params['state'] + site = module.params['site'] + rc = None + out = '' + err = '' + result = {} + result['name'] = name + result['state'] = state + + if state == 'present': + if not package_installed(module, name): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_install(module, state, name, site) + # Stdout is normally empty but for some packages can be + # very long and is not often useful + if len(out) > 75: + out = out[:75] + '...' + + elif state == 'latest': + if not package_installed(module, name): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_install(module, state, name, site) + else: + if not package_latest(module, name, site): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_upgrade(module, name, site) + if len(out) > 75: + out = out[:75] + '...' + + elif state == 'absent': + if package_installed(module, name): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_uninstall(module, name) + out = out[:75] + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/packaging/portage b/lib/ansible/modules/extras/packaging/portage new file mode 100644 index 00000000000..85027bfc79b --- /dev/null +++ b/lib/ansible/modules/extras/packaging/portage @@ -0,0 +1,405 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Yap Sok Ann +# Written by Yap Sok Ann +# Based on apt module written by Matthew Williams +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +DOCUMENTATION = ''' +--- +module: portage +short_description: Package manager for Gentoo +description: + - Manages Gentoo packages +version_added: "1.6" + +options: + package: + description: + - Package atom or set, e.g. C(sys-apps/foo) or C(>foo-2.13) or C(@world) + required: false + default: null + + state: + description: + - State of the package atom + required: false + default: "present" + choices: [ "present", "installed", "emerged", "absent", "removed", "unmerged" ] + + update: + description: + - Update packages to the best version available (--update) + required: false + default: null + choices: [ "yes" ] + + deep: + description: + - Consider the entire dependency tree of packages (--deep) + required: false + default: null + choices: [ "yes" ] + + newuse: + description: + - Include installed packages where USE flags have changed (--newuse) + required: false + default: null + choices: [ "yes" ] + + changed_use: + description: + - Include installed packages where USE flags have changed, except when + - flags that the user has not enabled are added or removed + - (--changed-use) + required: false + default: null + choices: [ "yes" ] + version_added: 1.8 + + oneshot: + description: + - Do not add the packages to the world file (--oneshot) + required: false + default: null + choices: [ "yes" ] + + noreplace: + description: + - Do not re-emerge installed packages (--noreplace) + required: false + default: null + choices: [ "yes" ] + + nodeps: + description: + - Only merge packages but not their dependencies (--nodeps) + required: false + default: null + choices: [ "yes" ] + + onlydeps: + description: + - Only merge packages' dependencies but not the packages (--onlydeps) + required: false + default: null + choices: [ "yes" ] + + depclean: + description: + - Remove packages not needed by explicitly merged packages (--depclean) + - If no package is specified, clean up the world's dependencies + - Otherwise, --depclean serves as a dependency aware version of --unmerge + required: false + default: null + choices: [ "yes" ] + + quiet: + description: + - Run emerge in quiet mode (--quiet) + required: false + default: null + choices: [ "yes" ] + + verbose: + description: + - Run emerge in verbose mode (--verbose) + required: false + default: null + choices: [ "yes" ] + + sync: + description: + - Sync package repositories first + - If yes, perform "emerge --sync" + - If web, perform "emerge-webrsync" + required: false + default: null + choices: [ "yes", "web" ] + +requirements: [ gentoolkit ] +author: Yap Sok Ann +notes: [] +''' + +EXAMPLES = ''' +# Make sure package foo is installed +- portage: package=foo state=present + +# Make sure package foo is not installed +- portage: package=foo state=absent + +# Update package foo to the "best" version +- portage: package=foo update=yes + +# Sync repositories and update world +- portage: package=@world update=yes deep=yes sync=yes + +# Remove unneeded packages +- portage: depclean=yes + +# Remove package foo if it is not explicitly needed +- portage: package=foo state=absent depclean=yes +''' + + +import os +import pipes + + +def query_package(module, package, action): + if package.startswith('@'): + return query_set(module, package, action) + return query_atom(module, package, action) + + +def query_atom(module, atom, action): + cmd = '%s list %s' % (module.equery_path, atom) + + rc, out, err = module.run_command(cmd) + return rc == 0 + + +def query_set(module, set, action): + system_sets = [ + '@live-rebuild', + '@module-rebuild', + '@preserved-rebuild', + '@security', + '@selected', + '@system', + '@world', + '@x11-module-rebuild', + ] + + if set in system_sets: + if action == 'unmerge': + module.fail_json(msg='set %s cannot be removed' % set) + return False + + world_sets_path = '/var/lib/portage/world_sets' + if not os.path.exists(world_sets_path): + return False + + cmd = 'grep %s %s' % (set, world_sets_path) + + rc, out, err = module.run_command(cmd) + return rc == 0 + + +def sync_repositories(module, webrsync=False): + if module.check_mode: + module.exit_json(msg='check mode not supported by sync') + + if webrsync: + webrsync_path = module.get_bin_path('emerge-webrsync', required=True) + cmd = '%s --quiet' % webrsync_path + else: + cmd = '%s --sync --quiet' % module.emerge_path + + rc, out, err = module.run_command(cmd) + if rc != 0: + module.fail_json(msg='could not sync package repositories') + + +# Note: In the 3 functions below, equery is done one-by-one, but emerge is done +# in one go. If that is not desirable, split the packages into multiple tasks +# instead of joining them together with comma. + + +def emerge_packages(module, packages): + p = module.params + + if not (p['update'] or p['noreplace']): + for package in packages: + if not query_package(module, package, 'emerge'): + break + else: + module.exit_json(changed=False, msg='Packages already present.') + + args = [] + emerge_flags = { + 'update': '--update', + 'deep': '--deep', + 'newuse': '--newuse', + 'changed_use': '--changed-use', + 'oneshot': '--oneshot', + 'noreplace': '--noreplace', + 'nodeps': '--nodeps', + 'onlydeps': '--onlydeps', + 'quiet': '--quiet', + 'verbose': '--verbose', + } + for flag, arg in emerge_flags.iteritems(): + if p[flag]: + args.append(arg) + + cmd, (rc, out, err) = run_emerge(module, packages, *args) + if rc != 0: + module.fail_json( + cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages not installed.', + ) + + changed = True + for line in out.splitlines(): + if line.startswith('>>> Emerging (1 of'): + break + else: + changed = False + + module.exit_json( + changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages installed.', + ) + + +def unmerge_packages(module, packages): + p = module.params + + for package in packages: + if query_package(module, package, 'unmerge'): + break + else: + module.exit_json(changed=False, msg='Packages already absent.') + + args = ['--unmerge'] + + for flag in ['quiet', 'verbose']: + if p[flag]: + args.append('--%s' % flag) + + cmd, (rc, out, err) = run_emerge(module, packages, *args) + + if rc != 0: + module.fail_json( + cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages not removed.', + ) + + module.exit_json( + changed=True, cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Packages removed.', + ) + + +def cleanup_packages(module, packages): + p = module.params + + if packages: + for package in packages: + if query_package(module, package, 'unmerge'): + break + else: + module.exit_json(changed=False, msg='Packages already absent.') + + args = ['--depclean'] + + for flag in ['quiet', 'verbose']: + if p[flag]: + args.append('--%s' % flag) + + cmd, (rc, out, err) = run_emerge(module, packages, *args) + if rc != 0: + module.fail_json(cmd=cmd, rc=rc, stdout=out, stderr=err) + + removed = 0 + for line in out.splitlines(): + if not line.startswith('Number removed:'): + continue + parts = line.split(':') + removed = int(parts[1].strip()) + changed = removed > 0 + + module.exit_json( + changed=changed, cmd=cmd, rc=rc, stdout=out, stderr=err, + msg='Depclean completed.', + ) + + +def run_emerge(module, packages, *args): + args = list(args) + + if module.check_mode: + args.append('--pretend') + + cmd = [module.emerge_path] + args + packages + return cmd, module.run_command(cmd) + + +portage_present_states = ['present', 'emerged', 'installed'] +portage_absent_states = ['absent', 'unmerged', 'removed'] + + +def main(): + module = AnsibleModule( + argument_spec=dict( + package=dict(default=None, aliases=['name']), + state=dict( + default=portage_present_states[0], + choices=portage_present_states + portage_absent_states, + ), + update=dict(default=None, choices=['yes']), + deep=dict(default=None, choices=['yes']), + newuse=dict(default=None, choices=['yes']), + changed_use=dict(default=None, choices=['yes']), + oneshot=dict(default=None, choices=['yes']), + noreplace=dict(default=None, choices=['yes']), + nodeps=dict(default=None, choices=['yes']), + onlydeps=dict(default=None, choices=['yes']), + depclean=dict(default=None, choices=['yes']), + quiet=dict(default=None, choices=['yes']), + verbose=dict(default=None, choices=['yes']), + sync=dict(default=None, choices=['yes', 'web']), + ), + required_one_of=[['package', 'sync', 'depclean']], + mutually_exclusive=[['nodeps', 'onlydeps'], ['quiet', 'verbose']], + supports_check_mode=True, + ) + + module.emerge_path = module.get_bin_path('emerge', required=True) + module.equery_path = module.get_bin_path('equery', required=True) + + p = module.params + + if p['sync']: + sync_repositories(module, webrsync=(p['sync'] == 'web')) + if not p['package']: + module.exit_json(msg='Sync successfully finished.') + + packages = p['package'].split(',') if p['package'] else [] + + if p['depclean']: + if packages and p['state'] not in portage_absent_states: + module.fail_json( + msg='Depclean can only be used with package when the state is ' + 'one of: %s' % portage_absent_states, + ) + + cleanup_packages(module, packages) + + elif p['state'] in portage_present_states: + emerge_packages(module, packages) + + elif p['state'] in portage_absent_states: + unmerge_packages(module, packages) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/packaging/portinstall b/lib/ansible/modules/extras/packaging/portinstall new file mode 100644 index 00000000000..068f413af72 --- /dev/null +++ b/lib/ansible/modules/extras/packaging/portinstall @@ -0,0 +1,207 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, berenddeboer +# Written by berenddeboer +# Based on pkgng module written by bleader +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +DOCUMENTATION = ''' +--- +module: portinstall +short_description: Installing packages from FreeBSD's ports system +description: + - Manage packages for FreeBSD using 'portinstall'. +version_added: "1.3" +options: + name: + description: + - name of package to install/remove + required: true + state: + description: + - state of the package + choices: [ 'present', 'absent' ] + required: false + default: present + use_packages: + description: + - use packages instead of ports whenever available + choices: [ 'yes', 'no' ] + required: false + default: yes +author: berenddeboer +''' + +EXAMPLES = ''' +# Install package foo +- portinstall: name=foo state=present + +# Install package security/cyrus-sasl2-saslauthd +- portinstall: name=security/cyrus-sasl2-saslauthd state=present + +# Remove packages foo and bar +- portinstall: name=foo,bar state=absent +''' + + +import json +import shlex +import os +import sys + +def query_package(module, name): + + pkg_info_path = module.get_bin_path('pkg_info', False) + + # Assume that if we have pkg_info, we haven't upgraded to pkgng + if pkg_info_path: + pkgng = False + pkg_glob_path = module.get_bin_path('pkg_glob', True) + rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, pipes.quote(name)), use_unsafe_shell=True) + else: + pkgng = True + pkg_info_path = module.get_bin_path('pkg', True) + pkg_info_path = pkg_info_path + " info" + rc, out, err = module.run_command("%s %s" % (pkg_info_path, name)) + + found = rc == 0 + + if not found: + # databases/mysql55-client installs as mysql-client, so try solving + # that the ugly way. Pity FreeBSD doesn't have a fool proof way of checking + # some package is installed + name_without_digits = re.sub('[0-9]', '', name) + if name != name_without_digits: + if pkgng: + rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) + else: + rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits)) + + found = rc == 0 + + return found + + +def matching_packages(module, name): + + ports_glob_path = module.get_bin_path('ports_glob', True) + rc, out, err = module.run_command("%s %s" % (ports_glob_path, name)) + #counts the numer of packages found + occurrences = out.count('\n') + if occurrences == 0: + name_without_digits = re.sub('[0-9]', '', name) + if name != name_without_digits: + rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits)) + occurrences = out.count('\n') + return occurrences + + +def remove_packages(module, packages): + + remove_c = 0 + pkg_glob_path = module.get_bin_path('pkg_glob', True) + + # If pkg_delete not found, we assume pkgng + pkg_delete_path = module.get_bin_path('pkg_delete', False) + if not pkg_delete_path: + pkg_delete_path = module.get_bin_path('pkg', True) + pkg_delete_path = pkg_delete_path + " delete -y" + + # Using a for loop incase of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, package): + continue + + rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(package)), use_unsafe_shell=True) + + if query_package(module, package): + name_without_digits = re.sub('[0-9]', '', package) + rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(name_without_digits)),use_unsafe_shell=True) + if query_package(module, package): + module.fail_json(msg="failed to remove %s: %s" % (package, out)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, packages, use_packages): + + install_c = 0 + + # If portinstall not found, automagically install + portinstall_path = module.get_bin_path('portinstall', False) + if not portinstall_path: + pkg_path = module.get_bin_path('pkg', False) + if pkg_path: + module.run_command("pkg install -y portupgrade") + portinstall_path = module.get_bin_path('portinstall', True) + + if use_packages == "yes": + portinstall_params="--use-packages" + else: + portinstall_params="" + + for package in packages: + if query_package(module, package): + continue + + # TODO: check how many match + matches = matching_packages(module, package) + if matches == 1: + rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package)) + if not query_package(module, package): + module.fail_json(msg="failed to install %s: %s" % (package, out)) + elif matches == 0: + module.fail_json(msg="no matches for package %s" % (package)) + else: + module.fail_json(msg="%s matches found for package name %s" % (matches, package)) + + install_c += 1 + + if install_c > 0: + module.exit_json(changed=True, msg="present %s package(s)" % (install_c)) + + module.exit_json(changed=False, msg="package(s) already present") + + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(default="present", choices=["present","absent"]), + name = dict(aliases=["pkg"], required=True), + use_packages = dict(type='bool', default='yes'))) + + p = module.params + + pkgs = p["name"].split(",") + + if p["state"] == "present": + install_packages(module, pkgs, p["use_packages"]) + + elif p["state"] == "absent": + remove_packages(module, pkgs) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/packaging/svr4pkg b/lib/ansible/modules/extras/packaging/svr4pkg new file mode 100644 index 00000000000..e95d4d8643f --- /dev/null +++ b/lib/ansible/modules/extras/packaging/svr4pkg @@ -0,0 +1,234 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Boyd Adamson +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: svr4pkg +short_description: Manage Solaris SVR4 packages +description: + - Manages SVR4 packages on Solaris 10 and 11. + - These were the native packages on Solaris <= 10 and are available + as a legacy feature in Solaris 11. + - Note that this is a very basic packaging system. It will not enforce + dependencies on install or remove. +version_added: "0.9" +author: Boyd Adamson +options: + name: + description: + - Package name, e.g. C(SUNWcsr) + required: true + + state: + description: + - Whether to install (C(present)), or remove (C(absent)) a package. + - If the package is to be installed, then I(src) is required. + - The SVR4 package system doesn't provide an upgrade operation. You need to uninstall the old, then install the new package. + required: true + choices: ["present", "absent"] + + src: + description: + - Specifies the location to install the package from. Required when C(state=present). + - "Can be any path acceptable to the C(pkgadd) command's C(-d) option. e.g.: C(somefile.pkg), C(/dir/with/pkgs), C(http:/server/mypkgs.pkg)." + - If using a file or directory, they must already be accessible by the host. See the M(copy) module for a way to get them there. + proxy: + description: + - HTTP[s] proxy to be used if C(src) is a URL. + response_file: + description: + - Specifies the location of a response file to be used if package expects input on install. (added in Ansible 1.4) + required: false + zone: + description: + - Whether to install the package only in the current zone, or install it into all zones. + - The installation into all zones works only if you are working with the global zone. + required: false + default: "all" + choices: ["current", "all"] + version_added: "1.6" + category: + description: + - Install/Remove category instead of a single package. + required: false + choices: ["true", "false"] + version_added: "1.6" +''' + +EXAMPLES = ''' +# Install a package from an already copied file +- svr4pkg: name=CSWcommon src=/tmp/cswpkgs.pkg state=present + +# Install a package directly from an http site +- svr4pkg: name=CSWpkgutil src=http://get.opencsw.org/now state=present zone=current + +# Install a package with a response file +- svr4pkg: name=CSWggrep src=/tmp/third-party.pkg response_file=/tmp/ggrep.response state=present + +# Ensure that a package is not installed. +- svr4pkg: name=SUNWgnome-sound-recorder state=absent + +# Ensure that a category is not installed. +- svr4pkg: name=FIREFOX state=absent category=true +''' + + +import os +import tempfile + +def package_installed(module, name, category): + cmd = [module.get_bin_path('pkginfo', True)] + cmd.append('-q') + if category: + cmd.append('-c') + cmd.append(name) + rc, out, err = module.run_command(' '.join(cmd)) + if rc == 0: + return True + else: + return False + +def create_admin_file(): + (desc, filename) = tempfile.mkstemp(prefix='ansible_svr4pkg', text=True) + fullauto = ''' +mail= +instance=unique +partial=nocheck +runlevel=quit +idepend=nocheck +rdepend=nocheck +space=quit +setuid=nocheck +conflict=nocheck +action=nocheck +networktimeout=60 +networkretries=3 +authentication=quit +keystore=/var/sadm/security +proxy= +basedir=default +''' + os.write(desc, fullauto) + os.close(desc) + return filename + +def run_command(module, cmd): + progname = cmd[0] + cmd[0] = module.get_bin_path(progname, True) + return module.run_command(cmd) + +def package_install(module, name, src, proxy, response_file, zone, category): + adminfile = create_admin_file() + cmd = [ 'pkgadd', '-n'] + if zone == 'current': + cmd += [ '-G' ] + cmd += [ '-a', adminfile, '-d', src ] + if proxy is not None: + cmd += [ '-x', proxy ] + if response_file is not None: + cmd += [ '-r', response_file ] + if category: + cmd += [ '-Y' ] + cmd.append(name) + (rc, out, err) = run_command(module, cmd) + os.unlink(adminfile) + return (rc, out, err) + +def package_uninstall(module, name, src, category): + adminfile = create_admin_file() + if category: + cmd = [ 'pkgrm', '-na', adminfile, '-Y', name ] + else: + cmd = [ 'pkgrm', '-na', adminfile, name] + (rc, out, err) = run_command(module, cmd) + os.unlink(adminfile) + return (rc, out, err) + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required = True), + state = dict(required = True, choices=['present', 'absent']), + src = dict(default = None), + proxy = dict(default = None), + response_file = dict(default = None), + zone = dict(required=False, default = 'all', choices=['current','all']), + category = dict(default=False, type='bool') + ), + supports_check_mode=True + ) + state = module.params['state'] + name = module.params['name'] + src = module.params['src'] + proxy = module.params['proxy'] + response_file = module.params['response_file'] + zone = module.params['zone'] + category = module.params['category'] + rc = None + out = '' + err = '' + result = {} + result['name'] = name + result['state'] = state + + if state == 'present': + if src is None: + module.fail_json(name=name, + msg="src is required when state=present") + if not package_installed(module, name, category): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_install(module, name, src, proxy, response_file, zone, category) + # Stdout is normally empty but for some packages can be + # very long and is not often useful + if len(out) > 75: + out = out[:75] + '...' + + elif state == 'absent': + if package_installed(module, name, category): + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = package_uninstall(module, name, src, category) + out = out[:75] + + # Success, Warning, Interruption, Reboot all, Reboot this return codes + if rc in (0, 2, 3, 10, 20): + result['changed'] = True + # no install nor uninstall, or failed + else: + result['changed'] = False + + # Fatal error, Administration, Administration Interaction return codes + if rc in (1, 4 , 5): + result['failed'] = True + else: + result['failed'] = False + + if out: + result['stdout'] = out + if err: + result['stderr'] = err + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/packaging/swdepot b/lib/ansible/modules/extras/packaging/swdepot new file mode 100644 index 00000000000..b41a860531f --- /dev/null +++ b/lib/ansible/modules/extras/packaging/swdepot @@ -0,0 +1,196 @@ +#!/usr/bin/python -tt +# -*- coding: utf-8 -*- + +# (c) 2013, Raul Melo +# Written by Raul Melo +# Based on yum module written by Seth Vidal +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + +import re +import pipes + +DOCUMENTATION = ''' +--- +module: swdepot +short_description: Manage packages with swdepot package manager (HP-UX) +description: + - Will install, upgrade and remove packages with swdepot package manager (HP-UX) +version_added: "1.4" +notes: [] +author: Raul Melo +options: + name: + description: + - package name. + required: true + default: null + choices: [] + aliases: [] + version_added: 1.4 + state: + description: + - whether to install (C(present), C(latest)), or remove (C(absent)) a package. + required: true + default: null + choices: [ 'present', 'latest', 'absent'] + aliases: [] + version_added: 1.4 + depot: + description: + - The source repository from which install or upgrade a package. + required: false + default: null + choices: [] + aliases: [] + version_added: 1.4 +''' + +EXAMPLES = ''' +- swdepot: name=unzip-6.0 state=installed depot=repository:/path +- swdepot: name=unzip state=latest depot=repository:/path +- swdepot: name=unzip state=absent +''' + +def compare_package(version1, version2): + """ Compare version packages. + Return values: + -1 first minor + 0 equal + 1 fisrt greater """ + + def normalize(v): + return [int(x) for x in re.sub(r'(\.0+)*$', '', v).split(".")] + return cmp(normalize(version1), normalize(version2)) + +def query_package(module, name, depot=None): + """ Returns whether a package is installed or not and version. """ + + cmd_list = '/usr/sbin/swlist -a revision -l product' + if depot: + rc, stdout, stderr = module.run_command("%s -s %s %s | grep %s" % (cmd_list, pipes.quote(depot), pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True) + else: + rc, stdout, stderr = module.run_command("%s %s | grep %s" % (cmd_list, pipes.quote(name), pipes.quote(name)), use_unsafe_shell=True) + if rc == 0: + version = re.sub("\s\s+|\t" , " ", stdout).strip().split()[1] + else: + version = None + + return rc, version + +def remove_package(module, name): + """ Uninstall package if installed. """ + + cmd_remove = '/usr/sbin/swremove' + rc, stdout, stderr = module.run_command("%s %s" % (cmd_remove, name)) + + if rc == 0: + return rc, stdout + else: + return rc, stderr + +def install_package(module, depot, name): + """ Install package if not already installed """ + + cmd_install = '/usr/sbin/swinstall -x mount_all_filesystems=false' + rc, stdout, stderr = module.run_command("%s -s %s %s" % (cmd_install, depot, name)) + if rc == 0: + return rc, stdout + else: + return rc, stderr + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(aliases=['pkg'], required=True), + state = dict(choices=['present', 'absent', 'latest'], required=True), + depot = dict(default=None, required=False) + ), + supports_check_mode=True + ) + name = module.params['name'] + state = module.params['state'] + depot = module.params['depot'] + + changed = False + msg = "No changed" + rc = 0 + if ( state == 'present' or state == 'latest' ) and depot == None: + output = "depot parameter is mandatory in present or latest task" + module.fail_json(name=name, msg=output, rc=rc) + + + #Check local version + rc, version_installed = query_package(module, name) + if not rc: + installed = True + msg = "Already installed" + + else: + installed = False + + if ( state == 'present' or state == 'latest' ) and installed == False: + if module.check_mode: + module.exit_json(changed=True) + rc, output = install_package(module, depot, name) + + if not rc: + changed = True + msg = "Packaged installed" + + else: + module.fail_json(name=name, msg=output, rc=rc) + + elif state == 'latest' and installed == True: + #Check depot version + rc, version_depot = query_package(module, name, depot) + + if not rc: + if compare_package(version_installed,version_depot) == -1: + if module.check_mode: + module.exit_json(changed=True) + #Install new version + rc, output = install_package(module, depot, name) + + if not rc: + msg = "Packge upgraded, Before " + version_installed + " Now " + version_depot + changed = True + + else: + module.fail_json(name=name, msg=output, rc=rc) + + else: + output = "Software package not in repository " + depot + module.fail_json(name=name, msg=output, rc=rc) + + elif state == 'absent' and installed == True: + if module.check_mode: + module.exit_json(changed=True) + rc, output = remove_package(module, name) + if not rc: + changed = True + msg = "Package removed" + else: + module.fail_json(name=name, msg=output, rc=rc) + + if module.check_mode: + module.exit_json(changed=False) + + module.exit_json(changed=changed, name=name, state=state, msg=msg) + +# import module snippets +from ansible.module_utils.basic import * + +main() + diff --git a/lib/ansible/modules/extras/packaging/urpmi b/lib/ansible/modules/extras/packaging/urpmi new file mode 100644 index 00000000000..a42ee7b87fc --- /dev/null +++ b/lib/ansible/modules/extras/packaging/urpmi @@ -0,0 +1,200 @@ +#!/usr/bin/python -tt +# -*- coding: utf-8 -*- + +# (c) 2013, Philippe Makowski +# Written by Philippe Makowski +# Based on apt module written by Matthew Williams +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +DOCUMENTATION = ''' +--- +module: urpmi +short_description: Urpmi manager +description: + - Manages packages with I(urpmi) (such as for Mageia or Mandriva) +version_added: "1.3.4" +options: + pkg: + description: + - name of package to install, upgrade or remove. + required: true + default: null + state: + description: + - Indicates the desired package state + required: false + default: present + choices: [ "absent", "present" ] + update_cache: + description: + - update the package database first C(urpmi.update -a). + required: false + default: no + choices: [ "yes", "no" ] + no-suggests: + description: + - Corresponds to the C(--no-suggests) option for I(urpmi). + required: false + default: yes + choices: [ "yes", "no" ] + force: + description: + - Corresponds to the C(--force) option for I(urpmi). + required: false + default: yes + choices: [ "yes", "no" ] +author: Philippe Makowski +notes: [] +''' + +EXAMPLES = ''' +# install package foo +- urpmi: pkg=foo state=present +# remove package foo +- urpmi: pkg=foo state=absent +# description: remove packages foo and bar +- urpmi: pkg=foo,bar state=absent +# description: update the package database (urpmi.update -a -q) and install bar (bar will be the updated if a newer version exists) +- urpmi: name=bar, state=present, update_cache=yes +''' + + +import json +import shlex +import os +import sys + +URPMI_PATH = '/usr/sbin/urpmi' +URPME_PATH = '/usr/sbin/urpme' + +def query_package(module, name): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + cmd = "rpm -q %s" % (name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + if rc == 0: + return True + else: + return False + +def query_package_provides(module, name): + # rpm -q returns 0 if the package is installed, + # 1 if it is not installed + cmd = "rpm -q --provides %s" % (name) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + return rc == 0 + + +def update_package_db(module): + cmd = "urpmi.update -a -q" + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + if rc != 0: + module.fail_json(msg="could not update package db") + + +def remove_packages(module, packages): + + remove_c = 0 + # Using a for loop incase of error, we can report the package that failed + for package in packages: + # Query the package first, to see if we even need to remove + if not query_package(module, package): + continue + + cmd = "%s --auto %s" % (URPME_PATH, package) + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + + if rc != 0: + module.fail_json(msg="failed to remove %s" % (package)) + + remove_c += 1 + + if remove_c > 0: + + module.exit_json(changed=True, msg="removed %s package(s)" % remove_c) + + module.exit_json(changed=False, msg="package(s) already absent") + + +def install_packages(module, pkgspec, force=True, no_suggests=True): + + packages = "" + for package in pkgspec: + if not query_package_provides(module, package): + packages += "'%s' " % package + + if len(packages) != 0: + if no_suggests: + no_suggests_yes = '--no-suggests' + else: + no_suggests_yes = '' + + if force: + force_yes = '--force' + else: + force_yes = '' + + cmd = ("%s --auto %s --quiet %s %s" % (URPMI_PATH, force_yes, no_suggests_yes, packages)) + + rc, out, err = module.run_command(cmd) + + installed = True + for packages in pkgspec: + if not query_package_provides(module, package): + installed = False + + # urpmi always have 0 for exit code if --force is used + if rc or not installed: + module.fail_json(msg="'urpmi %s' failed: %s" % (packages, err)) + else: + module.exit_json(changed=True, msg="%s present(s)" % packages) + else: + module.exit_json(changed=False) + + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(default='installed', choices=['installed', 'removed', 'absent', 'present']), + update_cache = dict(default=False, aliases=['update-cache'], type='bool'), + force = dict(default=True, type='bool'), + no_suggests = dict(default=True, aliases=['no-suggests'], type='bool'), + package = dict(aliases=['pkg', 'name'], required=True))) + + + if not os.path.exists(URPMI_PATH): + module.fail_json(msg="cannot find urpmi, looking for %s" % (URPMI_PATH)) + + p = module.params + + force_yes = p['force'] + no_suggest_yes = p['no_suggests'] + + if p['update_cache']: + update_package_db(module) + + packages = p['package'].split(',') + + if p['state'] in [ 'installed', 'present' ]: + install_packages(module, packages, force_yes, no_suggest_yes) + + elif p['state'] in [ 'removed', 'absent' ]: + remove_packages(module, packages) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/packaging/zypper b/lib/ansible/modules/extras/packaging/zypper new file mode 100644 index 00000000000..73b15694800 --- /dev/null +++ b/lib/ansible/modules/extras/packaging/zypper @@ -0,0 +1,260 @@ +#!/usr/bin/python -tt +# -*- coding: utf-8 -*- + +# (c) 2013, Patrick Callahan +# based on +# openbsd_pkg +# (c) 2013 +# Patrik Lundin +# +# yum +# (c) 2012, Red Hat, Inc +# Written by Seth Vidal +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import re + +DOCUMENTATION = ''' +--- +module: zypper +author: Patrick Callahan +version_added: "1.2" +short_description: Manage packages on SuSE and openSuSE +description: + - Manage packages on SuSE and openSuSE using the zypper and rpm tools. +options: + name: + description: + - package name or package specifier wth version C(name) or C(name-1.0). + required: true + aliases: [ 'pkg' ] + state: + description: + - C(present) will make sure the package is installed. + C(latest) will make sure the latest version of the package is installed. + C(absent) will make sure the specified package is not installed. + required: false + choices: [ present, latest, absent ] + default: "present" + disable_gpg_check: + description: + - Whether to disable to GPG signature checking of the package + signature being installed. Has an effect only if state is + I(present) or I(latest). + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [] + disable_recommends: + version_added: "1.8" + description: + - Corresponds to the C(--no-recommends) option for I(zypper). Default behavior (C(yes)) modifies zypper's default behavior; C(no) does install recommended packages. + required: false + default: "yes" + choices: [ "yes", "no" ] + +notes: [] +# informational: requirements for nodes +requirements: [ zypper, rpm ] +author: Patrick Callahan +''' + +EXAMPLES = ''' +# Install "nmap" +- zypper: name=nmap state=present + +# Install apache2 with recommended packages +- zypper: name=apache2 state=present disable_recommends=no + +# Remove the "nmap" package +- zypper: name=nmap state=absent +''' + +# Function used for getting versions of currently installed packages. +def get_current_version(m, name): + cmd = ['/bin/rpm', '-q', '--qf', '%{NAME} %{VERSION}-%{RELEASE}\n'] + cmd.extend(name) + (rc, stdout, stderr) = m.run_command(cmd) + + current_version = {} + rpmoutput_re = re.compile('^(\S+) (\S+)$') + for stdoutline, package in zip(stdout.splitlines(), name): + m = rpmoutput_re.match(stdoutline) + if m == None: + return None + rpmpackage = m.group(1) + rpmversion = m.group(2) + if package != rpmpackage: + return None + current_version[package] = rpmversion + + return current_version + +# Function used to find out if a package is currently installed. +def get_package_state(m, packages): + cmd = ['/bin/rpm', '--query', '--qf', 'package %{NAME} is installed\n'] + cmd.extend(packages) + + rc, stdout, stderr = m.run_command(cmd, check_rc=False) + + installed_state = {} + rpmoutput_re = re.compile('^package (\S+) (.*)$') + for stdoutline, name in zip(stdout.splitlines(), packages): + m = rpmoutput_re.match(stdoutline) + if m == None: + return None + package = m.group(1) + result = m.group(2) + if not name.startswith(package): + print name + ':' + package + ':' + stdoutline + '\n' + return None + if result == 'is installed': + installed_state[name] = True + else: + installed_state[name] = False + + return installed_state + +# Function used to make sure a package is present. +def package_present(m, name, installed_state, disable_gpg_check, disable_recommends): + packages = [] + for package in name: + if installed_state[package] is False: + packages.append(package) + if len(packages) != 0: + cmd = ['/usr/bin/zypper', '--non-interactive'] + # add global options before zypper command + if disable_gpg_check: + cmd.append('--no-gpg-check') + + cmd.extend(['install', '--auto-agree-with-licenses']) + # add install parameter + if disable_recommends: + cmd.append('--no-recommends') + cmd.extend(packages) + rc, stdout, stderr = m.run_command(cmd, check_rc=False) + + if rc == 0: + changed=True + else: + changed=False + else: + rc = 0 + stdout = '' + stderr = '' + changed=False + + return (rc, stdout, stderr, changed) + +# Function used to make sure a package is the latest available version. +def package_latest(m, name, installed_state, disable_gpg_check, disable_recommends): + + # first of all, make sure all the packages are installed + (rc, stdout, stderr, changed) = package_present(m, name, installed_state, disable_gpg_check) + + # if we've already made a change, we don't have to check whether a version changed + if not changed: + pre_upgrade_versions = get_current_version(m, name) + + cmd = ['/usr/bin/zypper', '--non-interactive', 'update', '--auto-agree-with-licenses'] + cmd.extend(name) + rc, stdout, stderr = m.run_command(cmd, check_rc=False) + + # if we've already made a change, we don't have to check whether a version changed + if not changed: + post_upgrade_versions = get_current_version(m, name) + if pre_upgrade_versions != post_upgrade_versions: + changed = True + + return (rc, stdout, stderr, changed) + +# Function used to make sure a package is not installed. +def package_absent(m, name, installed_state): + packages = [] + for package in name: + if installed_state[package] is True: + packages.append(package) + if len(packages) != 0: + cmd = ['/usr/bin/zypper', '--non-interactive', 'remove'] + cmd.extend(packages) + rc, stdout, stderr = m.run_command(cmd) + + if rc == 0: + changed=True + else: + changed=False + else: + rc = 0 + stdout = '' + stderr = '' + changed=False + + return (rc, stdout, stderr, changed) + +# =========================================== +# Main control flow + +def main(): + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, aliases=['pkg'], type='list'), + state = dict(required=False, default='present', choices=['absent', 'installed', 'latest', 'present', 'removed']), + disable_gpg_check = dict(required=False, default='no', type='bool'), + disable_recommends = dict(required=False, default='yes', type='bool'), + ), + supports_check_mode = False + ) + + + params = module.params + + name = params['name'] + state = params['state'] + disable_gpg_check = params['disable_gpg_check'] + disable_recommends = params['disable_recommends'] + + rc = 0 + stdout = '' + stderr = '' + result = {} + result['name'] = name + result['state'] = state + + # Get package state + installed_state = get_package_state(module, name) + + # Perform requested action + if state in ['installed', 'present']: + (rc, stdout, stderr, changed) = package_present(module, name, installed_state, disable_gpg_check, disable_recommends) + elif state in ['absent', 'removed']: + (rc, stdout, stderr, changed) = package_absent(module, name, installed_state) + elif state == 'latest': + (rc, stdout, stderr, changed) = package_latest(module, name, installed_state, disable_gpg_check, disable_recommends) + + if rc != 0: + if stderr: + module.fail_json(msg=stderr) + else: + module.fail_json(msg=stdout) + + result['changed'] = changed + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/packaging/zypper_repository b/lib/ansible/modules/extras/packaging/zypper_repository new file mode 100644 index 00000000000..1eb4ffdb343 --- /dev/null +++ b/lib/ansible/modules/extras/packaging/zypper_repository @@ -0,0 +1,221 @@ +#!/usr/bin/python +# encoding: utf-8 + +# (c) 2013, Matthias Vogelgesang +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + + +DOCUMENTATION = ''' +--- +module: zypper_repository +author: Matthias Vogelgesang +version_added: "1.4" +short_description: Add and remove Zypper repositories +description: + - Add or remove Zypper repositories on SUSE and openSUSE +options: + name: + required: false + default: none + description: + - A name for the repository. Not required when adding repofiles. + repo: + required: false + default: none + description: + - URI of the repository or .repo file. Required when state=present. + state: + required: false + choices: [ "absent", "present" ] + default: "present" + description: + - A source string state. + description: + required: false + default: none + description: + - A description of the repository + disable_gpg_check: + description: + - Whether to disable GPG signature checking of + all packages. Has an effect only if state is + I(present). + required: false + default: "no" + choices: [ "yes", "no" ] + aliases: [] +notes: [] +requirements: [ zypper ] +''' + +EXAMPLES = ''' +# Add NVIDIA repository for graphics drivers +- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=present + +# Remove NVIDIA repository +- zypper_repository: name=nvidia-repo repo='ftp://download.nvidia.com/opensuse/12.2' state=absent + +# Add python development repository +- zypper_repository: repo=http://download.opensuse.org/repositories/devel:/languages:/python/SLE_11_SP3/devel:languages:python.repo +''' +from xml.dom.minidom import parseString as parseXML + +REPO_OPTS = ['alias', 'name', 'priority', 'enabled', 'autorefresh', 'gpgcheck'] + + +def _parse_repos(module): + """parses the output of zypper -x lr and returns a parse repo dictionary""" + cmd = ['/usr/bin/zypper', '-x', 'lr'] + repos = [] + + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + dom = parseXML(stdout) + repo_list = dom.getElementsByTagName('repo') + for repo in repo_list: + opts = {} + for o in REPO_OPTS: + opts[o] = repo.getAttribute(o) + opts['url'] = repo.getElementsByTagName('url')[0].firstChild.data + # A repo can be uniquely identified by an alias + url + repos.append(opts) + + return repos + + +def repo_exists(module, **kwargs): + + def repo_subset(realrepo, repocmp): + for k in repocmp: + if k not in realrepo: + return False + + for k, v in realrepo.items(): + if k in repocmp: + if v.rstrip("/") != repocmp[k].rstrip("/"): + return False + return True + + repos = _parse_repos(module) + + for repo in repos: + if repo_subset(repo, kwargs): + return True + return False + + +def add_repo(module, repo, alias, description, disable_gpg_check): + cmd = ['/usr/bin/zypper', 'ar', '--check', '--refresh'] + + if description: + cmd.extend(['--name', description]) + + if disable_gpg_check: + cmd.append('--no-gpgcheck') + + cmd.append(repo) + + if not repo.endswith('.repo'): + cmd.append(alias) + + rc, stdout, stderr = module.run_command(cmd, check_rc=False) + changed = rc == 0 + if rc == 0: + changed = True + elif 'already exists. Please use another alias' in stderr: + changed = False + else: + module.fail_json(msg=stderr if stderr else stdout) + + return changed + + +def remove_repo(module, repo, alias): + + cmd = ['/usr/bin/zypper', 'rr'] + if alias: + cmd.append(alias) + else: + cmd.append(repo) + + rc, stdout, stderr = module.run_command(cmd, check_rc=True) + changed = rc == 0 + return changed + + +def fail_if_rc_is_null(module, rc, stdout, stderr): + if rc != 0: + module.fail_json(msg=stderr if stderr else stdout) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=False), + repo=dict(required=False), + state=dict(choices=['present', 'absent'], default='present'), + description=dict(required=False), + disable_gpg_check = dict(required=False, default='no', type='bool'), + ), + supports_check_mode=False, + ) + + repo = module.params['repo'] + state = module.params['state'] + name = module.params['name'] + description = module.params['description'] + disable_gpg_check = module.params['disable_gpg_check'] + + def exit_unchanged(): + module.exit_json(changed=False, repo=repo, state=state, name=name) + + # Check run-time module parameters + if state == 'present' and not repo: + module.fail_json(msg='Module option state=present requires repo') + if state == 'absent' and not repo and not name: + module.fail_json(msg='Alias or repo parameter required when state=absent') + + if repo and repo.endswith('.repo'): + if name: + module.fail_json(msg='Incompatible option: \'name\'. Do not use name when adding repo files') + else: + if not name and state == "present": + module.fail_json(msg='Name required when adding non-repo files:') + + if repo and repo.endswith('.repo'): + exists = repo_exists(module, url=repo, alias=name) + elif repo: + exists = repo_exists(module, url=repo) + else: + exists = repo_exists(module, alias=name) + + if state == 'present': + if exists: + exit_unchanged() + + changed = add_repo(module, repo, name, description, disable_gpg_check) + elif state == 'absent': + if not exists: + exit_unchanged() + + changed = remove_repo(module, repo, name) + + module.exit_json(changed=changed, repo=repo, state=state) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/source_control/bzr b/lib/ansible/modules/extras/source_control/bzr new file mode 100644 index 00000000000..996150a39af --- /dev/null +++ b/lib/ansible/modules/extras/source_control/bzr @@ -0,0 +1,198 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, André Paramés +# Based on the Git module by Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = u''' +--- +module: bzr +author: André Paramés +version_added: "1.1" +short_description: Deploy software (or files) from bzr branches +description: + - Manage I(bzr) branches to deploy files or software. +options: + name: + required: true + aliases: [ 'parent' ] + description: + - SSH or HTTP protocol address of the parent branch. + dest: + required: true + description: + - Absolute path of where the branch should be cloned to. + version: + required: false + default: "head" + description: + - What version of the branch to clone. This can be the + bzr revno or revid. + force: + required: false + default: "yes" + choices: [ 'yes', 'no' ] + description: + - If C(yes), any modified files in the working + tree will be discarded. + executable: + required: false + default: null + version_added: "1.4" + description: + - Path to bzr executable to use. If not supplied, + the normal mechanism for resolving binary paths will be used. +''' + +EXAMPLES = ''' +# Example bzr checkout from Ansible Playbooks +- bzr: name=bzr+ssh://foosball.example.org/path/to/branch dest=/srv/checkout version=22 +''' + +import re + + +class Bzr(object): + def __init__(self, module, parent, dest, version, bzr_path): + self.module = module + self.parent = parent + self.dest = dest + self.version = version + self.bzr_path = bzr_path + + def _command(self, args_list, cwd=None, **kwargs): + (rc, out, err) = self.module.run_command([self.bzr_path] + args_list, cwd=cwd, **kwargs) + return (rc, out, err) + + def get_version(self): + '''samples the version of the bzr branch''' + + cmd = "%s revno" % self.bzr_path + rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) + revno = stdout.strip() + return revno + + def clone(self): + '''makes a new bzr branch if it does not already exist''' + dest_dirname = os.path.dirname(self.dest) + try: + os.makedirs(dest_dirname) + except: + pass + if self.version.lower() != 'head': + args_list = ["branch", "-r", self.version, self.parent, self.dest] + else: + args_list = ["branch", self.parent, self.dest] + return self._command(args_list, check_rc=True, cwd=dest_dirname) + + def has_local_mods(self): + + cmd = "%s status -S" % self.bzr_path + rc, stdout, stderr = self.module.run_command(cmd, cwd=self.dest) + lines = stdout.splitlines() + + lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines) + return len(lines) > 0 + + def reset(self, force): + ''' + Resets the index and working tree to head. + Discards any changes to tracked files in the working + tree since that commit. + ''' + if not force and self.has_local_mods(): + self.module.fail_json(msg="Local modifications exist in branch (force=no).") + return self._command(["revert"], check_rc=True, cwd=self.dest) + + def fetch(self): + '''updates branch from remote sources''' + if self.version.lower() != 'head': + (rc, out, err) = self._command(["pull", "-r", self.version], cwd=self.dest) + else: + (rc, out, err) = self._command(["pull"], cwd=self.dest) + if rc != 0: + self.module.fail_json(msg="Failed to pull") + return (rc, out, err) + + def switch_version(self): + '''once pulled, switch to a particular revno or revid''' + if self.version.lower() != 'head': + args_list = ["revert", "-r", self.version] + else: + args_list = ["revert"] + return self._command(args_list, check_rc=True, cwd=self.dest) + +# =========================================== + +def main(): + module = AnsibleModule( + argument_spec = dict( + dest=dict(required=True), + name=dict(required=True, aliases=['parent']), + version=dict(default='head'), + force=dict(default='yes', type='bool'), + executable=dict(default=None), + ) + ) + + dest = os.path.abspath(os.path.expanduser(module.params['dest'])) + parent = module.params['name'] + version = module.params['version'] + force = module.params['force'] + bzr_path = module.params['executable'] or module.get_bin_path('bzr', True) + + bzrconfig = os.path.join(dest, '.bzr', 'branch', 'branch.conf') + + rc, out, err, status = (0, None, None, None) + + bzr = Bzr(module, parent, dest, version, bzr_path) + + # if there is no bzr configuration, do a branch operation + # else pull and switch the version + before = None + local_mods = False + if not os.path.exists(bzrconfig): + (rc, out, err) = bzr.clone() + + else: + # else do a pull + local_mods = bzr.has_local_mods() + before = bzr.get_version() + (rc, out, err) = bzr.reset(force) + if rc != 0: + module.fail_json(msg=err) + (rc, out, err) = bzr.fetch() + if rc != 0: + module.fail_json(msg=err) + + # switch to version specified regardless of whether + # we cloned or pulled + (rc, out, err) = bzr.switch_version() + + # determine if we changed anything + after = bzr.get_version() + changed = False + + if before != after or local_mods: + changed = True + + module.exit_json(changed=changed, before=before, after=after) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/source_control/github_hooks b/lib/ansible/modules/extras/source_control/github_hooks new file mode 100644 index 00000000000..6a8d1ced935 --- /dev/null +++ b/lib/ansible/modules/extras/source_control/github_hooks @@ -0,0 +1,178 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Phillip Gentry +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import json +import base64 + +DOCUMENTATION = ''' +--- +module: github_hooks +short_description: Manages github service hooks. +description: + - Adds service hooks and removes service hooks that have an error status. +version_added: "1.4" +options: + user: + description: + - Github username. + required: true + oauthkey: + description: + - The oauth key provided by github. It can be found/generated on github under "Edit Your Profile" >> "Applications" >> "Personal Access Tokens" + required: true + repo: + description: + - "This is the API url for the repository you want to manage hooks for. It should be in the form of: https://api.github.com/repos/user:/repo:. Note this is different than the normal repo url." + required: true + hookurl: + description: + - When creating a new hook, this is the url that you want github to post to. It is only required when creating a new hook. + required: false + action: + description: + - This tells the githooks module what you want it to do. + required: true + choices: [ "create", "cleanall" ] + validate_certs: + description: + - If C(no), SSL certificates for the target repo will not be validated. This should only be used + on personally controlled sites using self-signed certificates. + required: false + default: 'yes' + choices: ['yes', 'no'] + +author: Phillip Gentry, CX Inc +''' + +EXAMPLES = ''' +# Example creating a new service hook. It ignores duplicates. +- github_hooks: action=create hookurl=http://11.111.111.111:2222 user={{ gituser }} oauthkey={{ oauthkey }} repo=https://api.github.com/repos/pcgentry/Github-Auto-Deploy + +# Cleaning all hooks for this repo that had an error on the last update. Since this works for all hooks in a repo it is probably best that this would be called from a handler. +- local_action: github_hooks action=cleanall user={{ gituser }} oauthkey={{ oauthkey }} repo={{ repo }} +''' + +def list(module, hookurl, oauthkey, repo, user): + url = "%s/hooks" % repo + auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '') + headers = { + 'Authorization': 'Basic %s' % auth, + } + response, info = fetch_url(module, url, headers=headers) + if info['status'] != 200: + return False, '' + else: + return False, response.read() + +def clean504(module, hookurl, oauthkey, repo, user): + current_hooks = list(hookurl, oauthkey, repo, user)[1] + decoded = json.loads(current_hooks) + + for hook in decoded: + if hook['last_response']['code'] == 504: + # print "Last response was an ERROR for hook:" + # print hook['id'] + delete(module, hookurl, oauthkey, repo, user, hook['id']) + + return 0, current_hooks + +def cleanall(module, hookurl, oauthkey, repo, user): + current_hooks = list(hookurl, oauthkey, repo, user)[1] + decoded = json.loads(current_hooks) + + for hook in decoded: + if hook['last_response']['code'] != 200: + # print "Last response was an ERROR for hook:" + # print hook['id'] + delete(module, hookurl, oauthkey, repo, user, hook['id']) + + return 0, current_hooks + +def create(module, hookurl, oauthkey, repo, user): + url = "%s/hooks" % repo + values = { + "active": True, + "name": "web", + "config": { + "url": "%s" % hookurl, + "content_type": "json" + } + } + data = json.dumps(values) + auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '') + headers = { + 'Authorization': 'Basic %s' % auth, + } + response, info = fetch_url(module, url, data=data, headers=headers) + if info['status'] != 200: + return 0, '[]' + else: + return 0, response.read() + +def delete(module, hookurl, oauthkey, repo, user, hookid): + url = "%s/hooks/%s" % (repo, hookid) + auth = base64.encodestring('%s:%s' % (user, oauthkey)).replace('\n', '') + headers = { + 'Authorization': 'Basic %s' % auth, + } + response, info = fetch_url(module, url, data=data, headers=headers, method='DELETE') + return response.read() + +def main(): + module = AnsibleModule( + argument_spec=dict( + action=dict(required=True), + hookurl=dict(required=False), + oauthkey=dict(required=True), + repo=dict(required=True), + user=dict(required=True), + validate_certs=dict(default='yes', type='bool'), + ) + ) + + action = module.params['action'] + hookurl = module.params['hookurl'] + oauthkey = module.params['oauthkey'] + repo = module.params['repo'] + user = module.params['user'] + + if action == "list": + (rc, out) = list(module, hookurl, oauthkey, repo, user) + + if action == "clean504": + (rc, out) = clean504(module, hookurl, oauthkey, repo, user) + + if action == "cleanall": + (rc, out) = cleanall(module, hookurl, oauthkey, repo, user) + + if action == "create": + (rc, out) = create(module, hookurl, oauthkey, repo, user) + + if rc != 0: + module.fail_json(msg="failed", result=out) + + module.exit_json(msg="success", result=out) + + +# import module snippets +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * + +main() diff --git a/lib/ansible/modules/extras/system/alternatives b/lib/ansible/modules/extras/system/alternatives new file mode 100755 index 00000000000..b80ffab944c --- /dev/null +++ b/lib/ansible/modules/extras/system/alternatives @@ -0,0 +1,140 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +""" +Ansible module to manage symbolic link alternatives. +(c) 2014, Gabe Mulley + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +DOCUMENTATION = ''' +--- +module: alternatives +short_description: Manages alternative programs for common commands +description: + - Manages symbolic links using the 'update-alternatives' tool provided on debian-like systems. + - Useful when multiple programs are installed but provide similar functionality (e.g. different editors). +version_added: "1.6" +options: + name: + description: + - The generic name of the link. + required: true + path: + description: + - The path to the real executable that the link should point to. + required: true + link: + description: + - The path to the symbolic link that should point to the real executable. + required: false +requirements: [ update-alternatives ] +''' + +EXAMPLES = ''' +- name: correct java version selected + alternatives: name=java path=/usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java + +- name: alternatives link created + alternatives: name=hadoop-conf link=/etc/hadoop/conf path=/etc/hadoop/conf.ansible +''' + +DEFAULT_LINK_PRIORITY = 50 + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + path = dict(required=True), + link = dict(required=False), + ) + ) + + params = module.params + name = params['name'] + path = params['path'] + link = params['link'] + + UPDATE_ALTERNATIVES = module.get_bin_path('update-alternatives',True) + + current_path = None + all_alternatives = [] + + (rc, query_output, query_error) = module.run_command( + [UPDATE_ALTERNATIVES, '--query', name] + ) + + # Gather the current setting and all alternatives from the query output. + # Query output should look something like this: + + # Name: java + # Link: /usr/bin/java + # Slaves: + # java.1.gz /usr/share/man/man1/java.1.gz + # Status: manual + # Best: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java + # Value: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java + + # Alternative: /usr/lib/jvm/java-6-openjdk-amd64/jre/bin/java + # Priority: 1061 + # Slaves: + # java.1.gz /usr/lib/jvm/java-6-openjdk-amd64/jre/man/man1/java.1.gz + + # Alternative: /usr/lib/jvm/java-7-openjdk-amd64/jre/bin/java + # Priority: 1071 + # Slaves: + # java.1.gz /usr/lib/jvm/java-7-openjdk-amd64/jre/man/man1/java.1.gz + + if rc == 0: + for line in query_output.splitlines(): + split_line = line.split(':') + if len(split_line) == 2: + key = split_line[0] + value = split_line[1].strip() + if key == 'Value': + current_path = value + elif key == 'Alternative': + all_alternatives.append(value) + elif key == 'Link' and not link: + link = value + + if current_path != path: + try: + # install the requested path if necessary + if path not in all_alternatives: + module.run_command( + [UPDATE_ALTERNATIVES, '--install', link, name, path, str(DEFAULT_LINK_PRIORITY)], + check_rc=True + ) + + # select the requested path + module.run_command( + [UPDATE_ALTERNATIVES, '--set', name, path], + check_rc=True + ) + + module.exit_json(changed=True) + except subprocess.CalledProcessError, cpe: + module.fail_json(msg=str(dir(cpe))) + else: + module.exit_json(changed=False) + + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/system/at b/lib/ansible/modules/extras/system/at new file mode 100644 index 00000000000..c63527563fd --- /dev/null +++ b/lib/ansible/modules/extras/system/at @@ -0,0 +1,200 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# (c) 2014, Richard Isaacson +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: at +short_description: Schedule the execution of a command or script file via the at command. +description: + - Use this module to schedule a command or script file to run once in the future. + - All jobs are executed in the 'a' queue. +version_added: "1.5" +options: + command: + description: + - A command to be executed in the future. + required: false + default: null + script_file: + description: + - An existing script file to be executed in the future. + required: false + default: null + count: + description: + - The count of units in the future to execute the command or script file. + required: true + units: + description: + - The type of units in the future to execute the command or script file. + required: true + choices: ["minutes", "hours", "days", "weeks"] + state: + description: + - The state dictates if the command or script file should be evaluated as present(added) or absent(deleted). + required: false + choices: ["present", "absent"] + default: "present" + unique: + description: + - If a matching job is present a new job will not be added. + required: false + default: false +requirements: + - at +author: Richard Isaacson +''' + +EXAMPLES = ''' +# Schedule a command to execute in 20 minutes as root. +- at: command="ls -d / > /dev/null" count=20 units="minutes" + +# Match a command to an existing job and delete the job. +- at: command="ls -d / > /dev/null" state="absent" + +# Schedule a command to execute in 20 minutes making sure it is unique in the queue. +- at: command="ls -d / > /dev/null" unique=true count=20 units="minutes" +''' + +import os +import tempfile + + +def add_job(module, result, at_cmd, count, units, command, script_file): + at_command = "%s now + %s %s -f %s" % (at_cmd, count, units, script_file) + rc, out, err = module.run_command(at_command, check_rc=True) + if command: + os.unlink(script_file) + result['changed'] = True + + +def delete_job(module, result, at_cmd, command, script_file): + for matching_job in get_matching_jobs(module, at_cmd, script_file): + at_command = "%s -d %s" % (at_cmd, matching_job) + rc, out, err = module.run_command(at_command, check_rc=True) + result['changed'] = True + if command: + os.unlink(script_file) + module.exit_json(**result) + + +def get_matching_jobs(module, at_cmd, script_file): + matching_jobs = [] + + atq_cmd = module.get_bin_path('atq', True) + + # Get list of job numbers for the user. + atq_command = "%s" % atq_cmd + rc, out, err = module.run_command(atq_command, check_rc=True) + current_jobs = out.splitlines() + if len(current_jobs) == 0: + return matching_jobs + + # Read script_file into a string. + script_file_string = open(script_file).read().strip() + + # Loop through the jobs. + # If the script text is contained in a job add job number to list. + for current_job in current_jobs: + split_current_job = current_job.split() + at_command = "%s -c %s" % (at_cmd, split_current_job[0]) + rc, out, err = module.run_command(at_command, check_rc=True) + if script_file_string in out: + matching_jobs.append(split_current_job[0]) + + # Return the list. + return matching_jobs + + +def create_tempfile(command): + filed, script_file = tempfile.mkstemp(prefix='at') + fileh = os.fdopen(filed, 'w') + fileh.write(command) + fileh.close() + return script_file + + +def main(): + + module = AnsibleModule( + argument_spec = dict( + command=dict(required=False, + type='str'), + script_file=dict(required=False, + type='str'), + count=dict(required=False, + type='int'), + units=dict(required=False, + default=None, + choices=['minutes', 'hours', 'days', 'weeks'], + type='str'), + state=dict(required=False, + default='present', + choices=['present', 'absent'], + type='str'), + unique=dict(required=False, + default=False, + type='bool') + ), + mutually_exclusive=[['command', 'script_file']], + required_one_of=[['command', 'script_file']], + supports_check_mode=False + ) + + at_cmd = module.get_bin_path('at', True) + + command = module.params['command'] + script_file = module.params['script_file'] + count = module.params['count'] + units = module.params['units'] + state = module.params['state'] + unique = module.params['unique'] + + if (state == 'present') and (not count or not units): + module.fail_json(msg="present state requires count and units") + + result = {'state': state, 'changed': False} + + # If command transform it into a script_file + if command: + script_file = create_tempfile(command) + + # if absent remove existing and return + if state == 'absent': + delete_job(module, result, at_cmd, command, script_file) + + # if unique if existing return unchanged + if unique: + if len(get_matching_jobs(module, at_cmd, script_file)) != 0: + if command: + os.unlink(script_file) + module.exit_json(**result) + + result['script_file'] = script_file + result['count'] = count + result['units'] = units + + add_job(module, result, at_cmd, count, units, command, script_file) + + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/system/capabilities b/lib/ansible/modules/extras/system/capabilities new file mode 100644 index 00000000000..f4a9f62c0d0 --- /dev/null +++ b/lib/ansible/modules/extras/system/capabilities @@ -0,0 +1,187 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Nate Coraor +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: capabilities +short_description: Manage Linux capabilities +description: + - This module manipulates files privileges using the Linux capabilities(7) system. +version_added: "1.6" +options: + path: + description: + - Specifies the path to the file to be managed. + required: true + default: null + capability: + description: + - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent)) + required: true + default: null + aliases: [ 'cap' ] + state: + description: + - Whether the entry should be present or absent in the file's capabilities. + choices: [ "present", "absent" ] + default: present +notes: + - The capabilities system will automatically transform operators and flags + into the effective set, so (for example, cap_foo=ep will probably become + cap_foo+ep). This module does not attempt to determine the final operator + and flags to compare, so you will want to ensure that your capabilities + argument matches the final capabilities. +requirements: [] +author: Nate Coraor +''' + +EXAMPLES = ''' +# Set cap_sys_chroot+ep on /foo +- capabilities: path=/foo capability=cap_sys_chroot+ep state=present + +# Remove cap_net_bind_service from /bar +- capabilities: path=/bar capability=cap_net_bind_service state=absent +''' + + +OPS = ( '=', '-', '+' ) + +# ============================================================== + +import os +import tempfile +import re + +class CapabilitiesModule(object): + + platform = 'Linux' + distribution = None + + def __init__(self, module): + self.module = module + self.path = module.params['path'].strip() + self.capability = module.params['capability'].strip().lower() + self.state = module.params['state'] + self.getcap_cmd = module.get_bin_path('getcap', required=True) + self.setcap_cmd = module.get_bin_path('setcap', required=True) + self.capability_tup = self._parse_cap(self.capability, op_required=self.state=='present') + + self.run() + + def run(self): + + current = self.getcap(self.path) + caps = [ cap[0] for cap in current ] + + if self.state == 'present' and self.capability_tup not in current: + # need to add capability + if self.module.check_mode: + self.module.exit_json(changed=True, msg='capabilities changed') + else: + # remove from current cap list if it's already set (but op/flags differ) + current = filter(lambda x: x[0] != self.capability_tup[0], current) + # add new cap with correct op/flags + current.append( self.capability_tup ) + self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) + elif self.state == 'absent' and self.capability_tup[0] in caps: + # need to remove capability + if self.module.check_mode: + self.module.exit_json(changed=True, msg='capabilities changed') + else: + # remove from current cap list and then set current list + current = filter(lambda x: x[0] != self.capability_tup[0], current) + self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current)) + self.module.exit_json(changed=False, state=self.state) + + def getcap(self, path): + rval = [] + cmd = "%s -v %s" % (self.getcap_cmd, path) + rc, stdout, stderr = self.module.run_command(cmd) + # If file xattrs are set but no caps are set the output will be: + # '/foo =' + # If file xattrs are unset the output will be: + # '/foo' + # If the file does not eixst the output will be (with rc == 0...): + # '/foo (No such file or directory)' + if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1): + self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr) + if stdout.strip() != path: + caps = stdout.split(' =')[1].strip().split() + for cap in caps: + cap = cap.lower() + # getcap condenses capabilities with the same op/flags into a + # comma-separated list, so we have to parse that + if ',' in cap: + cap_group = cap.split(',') + cap_group[-1], op, flags = self._parse_cap(cap_group[-1]) + for subcap in cap_group: + rval.append( ( subcap, op, flags ) ) + else: + rval.append(self._parse_cap(cap)) + return rval + + def setcap(self, path, caps): + caps = ' '.join([ ''.join(cap) for cap in caps ]) + cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path) + rc, stdout, stderr = self.module.run_command(cmd) + if rc != 0: + self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr) + else: + return stdout + + def _parse_cap(self, cap, op_required=True): + opind = -1 + try: + i = 0 + while opind == -1: + opind = cap.find(OPS[i]) + i += 1 + except: + if op_required: + self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS)) + else: + return (cap, None, None) + op = cap[opind] + cap, flags = cap.split(op) + return (cap, op, flags) + +# ============================================================== +# main + +def main(): + + # defining module + module = AnsibleModule( + argument_spec = dict( + path = dict(aliases=['key'], required=True), + capability = dict(aliases=['cap'], required=True), + state = dict(default='present', choices=['present', 'absent']), + ), + supports_check_mode=True + ) + + CapabilitiesModule(module) + + sys.exit(0) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/system/debconf b/lib/ansible/modules/extras/system/debconf new file mode 100644 index 00000000000..7f5ea0368ca --- /dev/null +++ b/lib/ansible/modules/extras/system/debconf @@ -0,0 +1,170 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +""" +Ansible module to configure .deb packages. +(c) 2014, Brian Coca + +This file is part of Ansible + +Ansible is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +Ansible is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with Ansible. If not, see . +""" + +DOCUMENTATION = ''' +--- +module: debconf +short_description: Configure a .deb package +description: + - Configure a .deb package using debconf-set-selections. Or just query + existing selections. +version_added: "1.6" +notes: + - This module requires the command line debconf tools. + - A number of questions have to be answered (depending on the package). + Use 'debconf-show ' on any Debian or derivative with the package + installed to see questions/settings available. +requirements: [ debconf, debconf-utils ] +options: + name: + description: + - Name of package to configure. + required: true + default: null + aliases: ['pkg'] + question: + description: + - A debconf configuration setting + required: false + default: null + aliases: ['setting', 'selection'] + vtype: + description: + - The type of the value supplied + required: false + default: null + choices: [string, password, boolean, select, multiselect, note, error, title, text] + aliases: [] + value: + description: + - Value to set the configuration to + required: false + default: null + aliases: ['answer'] + unseen: + description: + - Do not set 'seen' flag when pre-seeding + required: false + default: False + aliases: [] +author: Brian Coca + +''' + +EXAMPLES = ''' +# Set default locale to fr_FR.UTF-8 +debconf: name=locales question='locales/default_environment_locale' value=fr_FR.UTF-8 vtype='select' + +# set to generate locales: +debconf: name=locales question='locales/locales_to_be_generated' value='en_US.UTF-8 UTF-8, fr_FR.UTF-8 UTF-8' vtype='multiselect' + +# Accept oracle license +debconf: name='oracle-java7-installer' question='shared/accepted-oracle-license-v1-1' value='true' vtype='select' + +# Specifying package you can register/return the list of questions and current values +debconf: name='tzdata' +''' + +import pipes + +def get_selections(module, pkg): + cmd = [module.get_bin_path('debconf-show', True), pkg] + rc, out, err = module.run_command(' '.join(cmd)) + + if rc != 0: + module.fail_json(msg=err) + + selections = {} + + for line in out.splitlines(): + (key, value) = line.split(':', 1) + selections[ key.strip('*').strip() ] = value.strip() + + return selections + + +def set_selection(module, pkg, question, vtype, value, unseen): + + data = ' '.join([ question, vtype, value ]) + + setsel = module.get_bin_path('debconf-set-selections', True) + cmd = ["echo %s %s |" % (pipes.quote(pkg), pipes.quote(data)), setsel] + if unseen: + cmd.append('-u') + + return module.run_command(' '.join(cmd), use_unsafe_shell=True) + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True, aliases=['pkg'], type='str'), + question = dict(required=False, aliases=['setting', 'selection'], type='str'), + vtype = dict(required=False, type='str', choices=['string', 'password', 'boolean', 'select', 'multiselect', 'note', 'error', 'title', 'text']), + value= dict(required=False, type='str'), + unseen = dict(required=False, type='bool'), + ), + required_together = ( ['question','vtype', 'value'],), + supports_check_mode=True, + ) + + #TODO: enable passing array of options and/or debconf file from get-selections dump + pkg = module.params["name"] + question = module.params["question"] + vtype = module.params["vtype"] + value = module.params["value"] + unseen = module.params["unseen"] + + prev = get_selections(module, pkg) + diff = '' + + changed = False + msg = "" + + if question is not None: + if vtype is None or value is None: + module.fail_json(msg="when supplying a question you must supply a valid vtype and value") + + if not question in prev or prev[question] != value: + changed = True + + if changed: + if not module.check_mode: + rc, msg, e = set_selection(module, pkg, question, vtype, value, unseen) + if rc: + module.fail_json(msg=e) + + curr = { question: value } + if question in prev: + prev = {question: prev[question]} + else: + prev[question] = '' + + module.exit_json(changed=changed, msg=msg, current=curr, previous=prev) + + module.exit_json(changed=changed, msg=msg, current=prev) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/system/facter b/lib/ansible/modules/extras/system/facter new file mode 100644 index 00000000000..a72cdc6536f --- /dev/null +++ b/lib/ansible/modules/extras/system/facter @@ -0,0 +1,56 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +DOCUMENTATION = ''' +--- +module: facter +short_description: Runs the discovery program I(facter) on the remote system +description: + - Runs the I(facter) discovery program + (U(https://github.com/puppetlabs/facter)) on the remote system, returning + JSON data that can be useful for inventory purposes. +version_added: "0.2" +options: {} +notes: [] +requirements: [ "facter", "ruby-json" ] +author: Michael DeHaan +''' + +EXAMPLES = ''' +# Example command-line invocation +ansible www.example.net -m facter +''' + +def main(): + module = AnsibleModule( + argument_spec = dict() + ) + + cmd = ["/usr/bin/env", "facter", "--json"] + rc, out, err = module.run_command(cmd, check_rc=True) + module.exit_json(**json.loads(out)) + +# import module snippets +from ansible.module_utils.basic import * + +main() + diff --git a/lib/ansible/modules/extras/system/filesystem b/lib/ansible/modules/extras/system/filesystem new file mode 100644 index 00000000000..064c0d0af86 --- /dev/null +++ b/lib/ansible/modules/extras/system/filesystem @@ -0,0 +1,119 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Alexander Bulimov +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +author: Alexander Bulimov +module: filesystem +short_description: Makes file system on block device +description: + - This module creates file system. +version_added: "1.2" +options: + fstype: + description: + - File System type to be created. + required: true + dev: + description: + - Target block device. + required: true + force: + choices: [ "yes", "no" ] + default: "no" + description: + - If yes, allows to create new filesystem on devices that already has filesystem. + required: false + opts: + description: + - List of options to be passed to mkfs command. +notes: + - uses mkfs command +''' + +EXAMPLES = ''' +# Create a ext2 filesystem on /dev/sdb1. +- filesystem: fstype=ext2 dev=/dev/sdb1 + +# Create a ext4 filesystem on /dev/sdb1 and check disk blocks. +- filesystem: fstype=ext4 dev=/dev/sdb1 opts="-cc" +''' + +def main(): + module = AnsibleModule( + argument_spec = dict( + fstype=dict(required=True, aliases=['type']), + dev=dict(required=True, aliases=['device']), + opts=dict(), + force=dict(type='bool', default='no'), + ), + supports_check_mode=True, + ) + + dev = module.params['dev'] + fstype = module.params['fstype'] + opts = module.params['opts'] + force = module.boolean(module.params['force']) + + changed = False + + if not os.path.exists(dev): + module.fail_json(msg="Device %s not found."%dev) + + cmd = module.get_bin_path('blkid', required=True) + + rc,raw_fs,err = module.run_command("%s -c /dev/null -o value -s TYPE %s" % (cmd, dev)) + fs = raw_fs.strip() + + + if fs == fstype: + module.exit_json(changed=False) + elif fs and not force: + module.fail_json(msg="'%s' is already used as %s, use force=yes to overwrite"%(dev,fs), rc=rc, err=err) + + ### create fs + + if module.check_mode: + changed = True + else: + mkfs = module.get_bin_path('mkfs', required=True) + cmd = None + if fstype in ['ext2', 'ext3', 'ext4', 'ext4dev']: + force_flag="-F" + elif fstype in ['btrfs']: + force_flag="-f" + else: + force_flag="" + + if opts is None: + cmd = "%s -t %s %s '%s'" % (mkfs, fstype, force_flag, dev) + else: + cmd = "%s -t %s %s %s '%s'" % (mkfs, fstype, force_flag, opts, dev) + rc,_,err = module.run_command(cmd) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating filesystem %s on device '%s' failed"%(fstype,dev), rc=rc, err=err) + + module.exit_json(changed=changed) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/system/firewalld b/lib/ansible/modules/extras/system/firewalld new file mode 100644 index 00000000000..22db165aad3 --- /dev/null +++ b/lib/ansible/modules/extras/system/firewalld @@ -0,0 +1,398 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Adam Miller (maxamillion@fedoraproject.org) +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: firewalld +short_description: Manage arbitrary ports/services with firewalld +description: + - This module allows for addition or deletion of services and ports either tcp or udp in either running or permanent firewalld rules +version_added: "1.4" +options: + service: + description: + - "Name of a service to add/remove to/from firewalld - service must be listed in /etc/services" + required: false + default: null + port: + description: + - "Name of a port to add/remove to/from firewalld must be in the form PORT/PROTOCOL" + required: false + default: null + rich_rule: + description: + - "Rich rule to add/remove to/from firewalld" + required: false + default: null + zone: + description: + - 'The firewalld zone to add/remove to/from (NOTE: default zone can be configured per system but "public" is default from upstream. Available choices can be extended based on per-system configs, listed here are "out of the box" defaults).' + required: false + default: system-default(public) + choices: [ "work", "drop", "internal", "external", "trusted", "home", "dmz", "public", "block"] + permanent: + description: + - "Should this configuration be in the running firewalld configuration or persist across reboots" + required: true + default: true + state: + description: + - "Should this port accept(enabled) or reject(disabled) connections" + required: true + default: enabled + timeout: + description: + - "The amount of time the rule should be in effect for when non-permanent" + required: false + default: 0 +notes: + - Not tested on any debian based system +requirements: [ firewalld >= 0.2.11 ] +author: Adam Miller +''' + +EXAMPLES = ''' +- firewalld: service=https permanent=true state=enabled +- firewalld: port=8081/tcp permanent=true state=disabled +- firewalld: zone=dmz service=http permanent=true state=enabled +- firewalld: rich_rule='rule service name="ftp" audit limit value="1/m" accept' permanent=true state=enabled +''' + +import os +import re +import sys + +try: + import firewall.config + FW_VERSION = firewall.config.VERSION + + from firewall.client import FirewallClient + fw = FirewallClient() + if not fw.connected: + raise Exception('failed to connect to the firewalld daemon') +except ImportError: + print "failed=True msg='firewalld required for this module'" + sys.exit(1) +except Exception, e: + print "failed=True msg='%s'" % str(e) + sys.exit(1) + +################ +# port handling +# +def get_port_enabled(zone, port_proto): + if port_proto in fw.getPorts(zone): + return True + else: + return False + +def set_port_enabled(zone, port, protocol, timeout): + fw.addPort(zone, port, protocol, timeout) + +def set_port_disabled(zone, port, protocol): + fw.removePort(zone, port, protocol) + +def get_port_enabled_permanent(zone, port_proto): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + if tuple(port_proto) in fw_settings.getPorts(): + return True + else: + return False + +def set_port_enabled_permanent(zone, port, protocol): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.addPort(port, protocol) + fw_zone.update(fw_settings) + +def set_port_disabled_permanent(zone, port, protocol): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.removePort(port, protocol) + fw_zone.update(fw_settings) + + +#################### +# service handling +# +def get_service_enabled(zone, service): + if service in fw.getServices(zone): + return True + else: + return False + +def set_service_enabled(zone, service, timeout): + fw.addService(zone, service, timeout) + +def set_service_disabled(zone, service): + fw.removeService(zone, service) + +def get_service_enabled_permanent(zone, service): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + if service in fw_settings.getServices(): + return True + else: + return False + +def set_service_enabled_permanent(zone, service): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.addService(service) + fw_zone.update(fw_settings) + +def set_service_disabled_permanent(zone, service): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.removeService(service) + fw_zone.update(fw_settings) + + +#################### +# rich rule handling +# +def get_rich_rule_enabled(zone, rule): + if rule in fw.getRichRules(zone): + return True + else: + return False + +def set_rich_rule_enabled(zone, rule, timeout): + fw.addRichRule(zone, rule, timeout) + +def set_rich_rule_disabled(zone, rule): + fw.removeRichRule(zone, rule) + +def get_rich_rule_enabled_permanent(zone, rule): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + if rule in fw_settings.getRichRules(): + return True + else: + return False + +def set_rich_rule_enabled_permanent(zone, rule): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.addRichRule(rule) + fw_zone.update(fw_settings) + +def set_rich_rule_disabled_permanent(zone, rule): + fw_zone = fw.config().getZoneByName(zone) + fw_settings = fw_zone.getSettings() + fw_settings.removeRichRule(rule) + fw_zone.update(fw_settings) + + +def main(): + + module = AnsibleModule( + argument_spec = dict( + service=dict(required=False,default=None), + port=dict(required=False,default=None), + rich_rule=dict(required=False,default=None), + zone=dict(required=False,default=None), + permanent=dict(type='bool',required=True), + state=dict(choices=['enabled', 'disabled'], required=True), + timeout=dict(type='int',required=False,default=0), + ), + supports_check_mode=True + ) + + ## Pre-run version checking + if FW_VERSION < "0.2.11": + module.fail_json(msg='unsupported version of firewalld, requires >= 2.0.11') + + ## Global Vars + changed=False + msgs = [] + service = module.params['service'] + rich_rule = module.params['rich_rule'] + + if module.params['port'] != None: + port, protocol = module.params['port'].split('/') + if protocol == None: + module.fail_json(msg='improper port format (missing protocol?)') + else: + port = None + + if module.params['zone'] != None: + zone = module.params['zone'] + else: + zone = fw.getDefaultZone() + + permanent = module.params['permanent'] + desired_state = module.params['state'] + timeout = module.params['timeout'] + + ## Check for firewalld running + try: + if fw.connected == False: + module.fail_json(msg='firewalld service must be running') + except AttributeError: + module.fail_json(msg="firewalld connection can't be established,\ + version likely too old. Requires firewalld >= 2.0.11") + + modification_count = 0 + if service != None: + modification_count += 1 + if port != None: + modification_count += 1 + if rich_rule != None: + modification_count += 1 + + if modification_count > 1: + module.fail_json(msg='can only operate on port, service or rich_rule at once') + + if service != None: + if permanent: + is_enabled = get_service_enabled_permanent(zone, service) + msgs.append('Permanent operation') + + if desired_state == "enabled": + if is_enabled == False: + if module.check_mode: + module.exit_json(changed=True) + + set_service_enabled_permanent(zone, service) + changed=True + elif desired_state == "disabled": + if is_enabled == True: + if module.check_mode: + module.exit_json(changed=True) + + set_service_disabled_permanent(zone, service) + changed=True + else: + is_enabled = get_service_enabled(zone, service) + msgs.append('Non-permanent operation') + + + if desired_state == "enabled": + if is_enabled == False: + if module.check_mode: + module.exit_json(changed=True) + + set_service_enabled(zone, service, timeout) + changed=True + elif desired_state == "disabled": + if is_enabled == True: + if module.check_mode: + module.exit_json(changed=True) + + set_service_disabled(zone, service) + changed=True + + if changed == True: + msgs.append("Changed service %s to %s" % (service, desired_state)) + + if port != None: + if permanent: + is_enabled = get_port_enabled_permanent(zone, [port, protocol]) + msgs.append('Permanent operation') + + if desired_state == "enabled": + if is_enabled == False: + if module.check_mode: + module.exit_json(changed=True) + + set_port_enabled_permanent(zone, port, protocol) + changed=True + elif desired_state == "disabled": + if is_enabled == True: + if module.check_mode: + module.exit_json(changed=True) + + set_port_disabled_permanent(zone, port, protocol) + changed=True + else: + is_enabled = get_port_enabled(zone, [port,protocol]) + msgs.append('Non-permanent operation') + + if desired_state == "enabled": + if is_enabled == False: + if module.check_mode: + module.exit_json(changed=True) + + set_port_enabled(zone, port, protocol, timeout) + changed=True + elif desired_state == "disabled": + if is_enabled == True: + if module.check_mode: + module.exit_json(changed=True) + + set_port_disabled(zone, port, protocol) + changed=True + + if changed == True: + msgs.append("Changed port %s to %s" % ("%s/%s" % (port, protocol), \ + desired_state)) + + if rich_rule != None: + if permanent: + is_enabled = get_rich_rule_enabled_permanent(zone, rich_rule) + msgs.append('Permanent operation') + + if desired_state == "enabled": + if is_enabled == False: + if module.check_mode: + module.exit_json(changed=True) + + set_rich_rule_enabled_permanent(zone, rich_rule) + changed=True + elif desired_state == "disabled": + if is_enabled == True: + if module.check_mode: + module.exit_json(changed=True) + + set_rich_rule_disabled_permanent(zone, rich_rule) + changed=True + else: + is_enabled = get_rich_rule_enabled(zone, rich_rule) + msgs.append('Non-permanent operation') + + if desired_state == "enabled": + if is_enabled == False: + if module.check_mode: + module.exit_json(changed=True) + + set_rich_rule_enabled(zone, rich_rule, timeout) + changed=True + elif desired_state == "disabled": + if is_enabled == True: + if module.check_mode: + module.exit_json(changed=True) + + set_rich_rule_disabled(zone, rich_rule) + changed=True + + if changed == True: + msgs.append("Changed rich_rule %s to %s" % (rich_rule, desired_state)) + + module.exit_json(changed=changed, msg=', '.join(msgs)) + + +################################################# +# import module snippets +from ansible.module_utils.basic import * + +main() + diff --git a/lib/ansible/modules/extras/system/getent b/lib/ansible/modules/extras/system/getent new file mode 100644 index 00000000000..0173618f699 --- /dev/null +++ b/lib/ansible/modules/extras/system/getent @@ -0,0 +1,143 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Brian Coca +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + + +DOCUMENTATION = ''' +--- +module: getent +short_description: a wrapper to the unix getent utility +description: + - Runs getent against one of it's various databases and returns information into + the host's facts +version_added: "1.8" +options: + database: + required: True + description: + - the name of a getent database supported by the target system (passwd, group, + hosts, etc). + key: + required: False + default: '' + description: + - key from which to return values from the specified database, otherwise the + full contents are returned. + split: + required: False + default: None + description: + - "character used to split the database values into lists/arrays such as ':' or '\t', otherwise it will try to pick one depending on the database" + fail_key: + required: False + default: True + description: + - If a supplied key is missing this will make the task fail if True + +notes: + - "Not all databases support enumeration, check system documentation for details" +requirements: [ ] +author: Brian Coca +''' + +EXAMPLES = ''' +# get root user info +- getent: database=passwd key=root + register: root_info + +# get all groups +- getent: database=group split=':' + register: groups + +# get all hosts, split by tab +- getent: database=hosts + register: hosts + +# get http service info, no error if missing +- getent: database=services key=http fail_key=False + register: http_info + +# get user password hash (requires sudo/root) +- getent: database=shadow key=www-data split=: + register: pw_hash + +''' + +def main(): + module = AnsibleModule( + argument_spec = dict( + database = dict(required=True), + key = dict(required=False, default=None), + split = dict(required=False, default=None), + fail_key = dict(required=False, default=True), + ), + supports_check_mode = True, + ) + + colon = [ 'passwd', 'shadow', 'group', 'gshadow' ] + + database = module.params['database'] + key = module.params.get('key') + split = module.params.get('split') + fail_key = module.params.get('fail_key') + + getent_bin = module.get_bin_path('getent', True) + + if key is not None: + cmd = [ getent_bin, database, key ] + else: + cmd = [ getent_bin, database ] + + if split is None and database in colon: + split = ':' + + try: + rc, out, err = module.run_command(cmd) + except Exception, e: + module.fail_json(msg=str(e)) + + msg = "Unexpected failure!" + dbtree = 'getent_%s' % database + results = { dbtree: {} } + + if rc == 0: + for line in out.splitlines(): + record = line.split(split) + results[dbtree][record[0]] = record[1:] + + module.exit_json(ansible_facts=results) + + elif rc == 1: + msg = "Missing arguments, or database unknown." + elif rc == 2: + msg = "One or more supplied key could not be found in the database." + if not fail_key: + results[dbtree][key] = None + module.exit_json(ansible_facts=results, msg=msg) + elif rc == 3: + msg = "Enumeration not supported on this database." + + module.fail_json(msg=msg) + +# import module snippets +from ansible.module_utils.basic import * + +main() + diff --git a/lib/ansible/modules/extras/system/kernel_blacklist b/lib/ansible/modules/extras/system/kernel_blacklist new file mode 100644 index 00000000000..6af08c0788c --- /dev/null +++ b/lib/ansible/modules/extras/system/kernel_blacklist @@ -0,0 +1,141 @@ +#!/usr/bin/python +# encoding: utf-8 -*- + +# (c) 2013, Matthias Vogelgesang +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +import os +import re + + +DOCUMENTATION = ''' +--- +module: kernel_blacklist +author: Matthias Vogelgesang +version_added: 1.4 +short_description: Blacklist kernel modules +description: + - Add or remove kernel modules from blacklist. +options: + name: + required: true + description: + - Name of kernel module to black- or whitelist. + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the module should be present in the blacklist or absent. + blacklist_file: + required: false + description: + - If specified, use this blacklist file instead of + C(/etc/modprobe.d/blacklist-ansible.conf). + default: null +requirements: [] +''' + +EXAMPLES = ''' +# Blacklist the nouveau driver module +- kernel_blacklist: name=nouveau state=present +''' + + +class Blacklist(object): + def __init__(self, module, filename): + if not os.path.exists(filename): + open(filename, 'a').close() + + self.filename = filename + self.module = module + + def get_pattern(self): + return '^blacklist\s*' + self.module + '$' + + def readlines(self): + f = open(self.filename, 'r') + lines = f.readlines() + f.close() + return lines + + def module_listed(self): + lines = self.readlines() + pattern = self.get_pattern() + + for line in lines: + stripped = line.strip() + if stripped.startswith('#'): + continue + + if re.match(pattern, stripped): + return True + + return False + + def remove_module(self): + lines = self.readlines() + pattern = self.get_pattern() + + f = open(self.filename, 'w') + + for line in lines: + if not re.match(pattern, line.strip()): + f.write(line) + + f.close() + + def add_module(self): + f = open(self.filename, 'a') + f.write('blacklist %s\n' % self.module) + + +def main(): + module = AnsibleModule( + argument_spec=dict( + name=dict(required=True), + state=dict(required=False, choices=['present', 'absent'], + default='present'), + blacklist_file=dict(required=False, default=None) + ), + supports_check_mode=False, + ) + + args = dict(changed=False, failed=False, + name=module.params['name'], state=module.params['state']) + + filename = '/etc/modprobe.d/blacklist-ansible.conf' + + if module.params['blacklist_file']: + filename = module.params['blacklist_file'] + + blacklist = Blacklist(args['name'], filename) + + if blacklist.module_listed(): + if args['state'] == 'absent': + blacklist.remove_module() + args['changed'] = True + else: + if args['state'] == 'present': + blacklist.add_module() + args['changed'] = True + + module.exit_json(**args) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/system/locale_gen b/lib/ansible/modules/extras/system/locale_gen new file mode 100644 index 00000000000..12eab8dbc8f --- /dev/null +++ b/lib/ansible/modules/extras/system/locale_gen @@ -0,0 +1,151 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import os +import os.path +from subprocess import Popen, PIPE, call + +DOCUMENTATION = ''' +--- +module: locale_gen +short_description: Creates of removes locales. +description: + - Manages locales by editing /etc/locale.gen and invoking locale-gen. +version_added: "1.6" +options: + name: + description: + - Name and encoding of the locale, such as "en_GB.UTF-8". + required: true + default: null + aliases: [] + state: + description: + - Whether the locale shall be present. + required: false + choices: ["present", "absent"] + default: "present" +''' + +EXAMPLES = ''' +# Ensure a locale exists. +- locale_gen: name=de_CH.UTF-8 state=present +''' + +# =========================================== +# location module specific support methods. +# + +def is_present(name): + """Checks if the given locale is currently installed.""" + output = Popen(["locale", "-a"], stdout=PIPE).communicate()[0] + return any(fix_case(name) == fix_case(line) for line in output.splitlines()) + +def fix_case(name): + """locale -a might return the encoding in either lower or upper case. + Passing through this function makes them uniform for comparisons.""" + return name.replace(".utf8", ".UTF-8") + +def replace_line(existing_line, new_line): + """Replaces lines in /etc/locale.gen""" + with open("/etc/locale.gen", "r") as f: + lines = [line.replace(existing_line, new_line) for line in f] + with open("/etc/locale.gen", "w") as f: + f.write("".join(lines)) + +def apply_change(targetState, name, encoding): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, either present or absent. + name -- Name including encoding such as de_CH.UTF-8. + encoding -- Encoding such as UTF-8. + """ + if targetState=="present": + # Create locale. + replace_line("# "+name+" "+encoding, name+" "+encoding) + else: + # Delete locale. + replace_line(name+" "+encoding, "# "+name+" "+encoding) + + localeGenExitValue = call("locale-gen") + if localeGenExitValue!=0: + raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue)) + +def apply_change_ubuntu(targetState, name, encoding): + """Create or remove locale. + + Keyword arguments: + targetState -- Desired state, either present or absent. + name -- Name including encoding such as de_CH.UTF-8. + encoding -- Encoding such as UTF-8. + """ + if targetState=="present": + # Create locale. + # Ubuntu's patched locale-gen automatically adds the new locale to /var/lib/locales/supported.d/local + localeGenExitValue = call(["locale-gen", name]) + else: + # Delete locale involves discarding the locale from /var/lib/locales/supported.d/local and regenerating all locales. + with open("/var/lib/locales/supported.d/local", "r") as f: + content = f.readlines() + with open("/var/lib/locales/supported.d/local", "w") as f: + for line in content: + if line!=(name+" "+encoding+"\n"): + f.write(line) + # Purge locales and regenerate. + # Please provide a patch if you know how to avoid regenerating the locales to keep! + localeGenExitValue = call(["locale-gen", "--purge"]) + + if localeGenExitValue!=0: + raise EnvironmentError(localeGenExitValue, "locale.gen failed to execute, it returned "+str(localeGenExitValue)) + +# ============================================================== +# main + +def main(): + + module = AnsibleModule( + argument_spec = dict( + name = dict(required=True), + state = dict(choices=['present','absent'], required=True), + ), + supports_check_mode=True + ) + + name = module.params['name'] + if not "." in name: + module.fail_json(msg="Locale does not match pattern. Did you specify the encoding?") + state = module.params['state'] + + if not os.path.exists("/etc/locale.gen"): + if os.path.exists("/var/lib/locales/supported.d/local"): + # Ubuntu created its own system to manage locales. + ubuntuMode = True + else: + module.fail_json(msg="/etc/locale.gen and /var/lib/locales/supported.d/local are missing. Is the package “locales” installed?") + else: + # We found the common way to manage locales. + ubuntuMode = False + + prev_state = "present" if is_present(name) else "absent" + changed = (prev_state!=state) + + if module.check_mode: + module.exit_json(changed=changed) + else: + encoding = name.split(".")[1] + if changed: + try: + if ubuntuMode==False: + apply_change(state, name, encoding) + else: + apply_change_ubuntu(state, name, encoding) + except EnvironmentError as e: + module.fail_json(msg=e.strerror, exitValue=e.errno) + + module.exit_json(name=name, changed=changed, msg="OK") + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/system/lvg b/lib/ansible/modules/extras/system/lvg new file mode 100644 index 00000000000..b7a86a27208 --- /dev/null +++ b/lib/ansible/modules/extras/system/lvg @@ -0,0 +1,253 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Alexander Bulimov +# based on lvol module by Jeroen Hoekx +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +author: Alexander Bulimov +module: lvg +short_description: Configure LVM volume groups +description: + - This module creates, removes or resizes volume groups. +version_added: "1.1" +options: + vg: + description: + - The name of the volume group. + required: true + pvs: + description: + - List of comma-separated devices to use as physical devices in this volume group. Required when creating or resizing volume group. + required: false + pesize: + description: + - The size of the physical extent in megabytes. Must be a power of 2. + default: 4 + required: false + vg_options: + description: + - Additional options to pass to C(vgcreate) when creating the volume group. + default: null + required: false + version_added: "1.6" + state: + choices: [ "present", "absent" ] + default: present + description: + - Control if the volume group exists. + required: false + force: + choices: [ "yes", "no" ] + default: "no" + description: + - If yes, allows to remove volume group with logical volumes. + required: false +notes: + - module does not modify PE size for already present volume group +''' + +EXAMPLES = ''' +# Create a volume group on top of /dev/sda1 with physical extent size = 32MB. +- lvg: vg=vg.services pvs=/dev/sda1 pesize=32 + +# Create or resize a volume group on top of /dev/sdb1 and /dev/sdc5. +# If, for example, we already have VG vg.services on top of /dev/sdb1, +# this VG will be extended by /dev/sdc5. Or if vg.services was created on +# top of /dev/sda5, we first extend it with /dev/sdb1 and /dev/sdc5, +# and then reduce by /dev/sda5. +- lvg: vg=vg.services pvs=/dev/sdb1,/dev/sdc5 + +# Remove a volume group with name vg.services. +- lvg: vg=vg.services state=absent +''' + +def parse_vgs(data): + vgs = [] + for line in data.splitlines(): + parts = line.strip().split(';') + vgs.append({ + 'name': parts[0], + 'pv_count': int(parts[1]), + 'lv_count': int(parts[2]), + }) + return vgs + +def find_mapper_device_name(module, dm_device): + dmsetup_cmd = module.get_bin_path('dmsetup', True) + mapper_prefix = '/dev/mapper/' + rc, dm_name, err = module.run_command("%s info -C --noheadings -o name %s" % (dmsetup_cmd, dm_device)) + if rc != 0: + module.fail_json(msg="Failed executing dmsetup command.", rc=rc, err=err) + mapper_device = mapper_prefix + dm_name.rstrip() + return mapper_device + +def parse_pvs(module, data): + pvs = [] + dm_prefix = '/dev/dm-' + for line in data.splitlines(): + parts = line.strip().split(';') + if parts[0].startswith(dm_prefix): + parts[0] = find_mapper_device_name(module, parts[0]) + pvs.append({ + 'name': parts[0], + 'vg_name': parts[1], + }) + return pvs + +def main(): + module = AnsibleModule( + argument_spec = dict( + vg=dict(required=True), + pvs=dict(type='list'), + pesize=dict(type='int', default=4), + vg_options=dict(default=''), + state=dict(choices=["absent", "present"], default='present'), + force=dict(type='bool', default='no'), + ), + supports_check_mode=True, + ) + + vg = module.params['vg'] + state = module.params['state'] + force = module.boolean(module.params['force']) + pesize = module.params['pesize'] + vgoptions = module.params['vg_options'].split() + + if module.params['pvs']: + dev_string = ' '.join(module.params['pvs']) + dev_list = module.params['pvs'] + elif state == 'present': + module.fail_json(msg="No physical volumes given.") + + + + if state=='present': + ### check given devices + for test_dev in dev_list: + if not os.path.exists(test_dev): + module.fail_json(msg="Device %s not found."%test_dev) + + ### get pv list + pvs_cmd = module.get_bin_path('pvs', True) + rc,current_pvs,err = module.run_command("%s --noheadings -o pv_name,vg_name --separator ';'" % pvs_cmd) + if rc != 0: + module.fail_json(msg="Failed executing pvs command.",rc=rc, err=err) + + ### check pv for devices + pvs = parse_pvs(module, current_pvs) + used_pvs = [ pv for pv in pvs if pv['name'] in dev_list and pv['vg_name'] and pv['vg_name'] != vg ] + if used_pvs: + module.fail_json(msg="Device %s is already in %s volume group."%(used_pvs[0]['name'],used_pvs[0]['vg_name'])) + + vgs_cmd = module.get_bin_path('vgs', True) + rc,current_vgs,err = module.run_command("%s --noheadings -o vg_name,pv_count,lv_count --separator ';'" % vgs_cmd) + + if rc != 0: + module.fail_json(msg="Failed executing vgs command.",rc=rc, err=err) + + changed = False + + vgs = parse_vgs(current_vgs) + + for test_vg in vgs: + if test_vg['name'] == vg: + this_vg = test_vg + break + else: + this_vg = None + + if this_vg is None: + if state == 'present': + ### create VG + if module.check_mode: + changed = True + else: + ### create PV + pvcreate_cmd = module.get_bin_path('pvcreate', True) + for current_dev in dev_list: + rc,_,err = module.run_command("%s %s" % (pvcreate_cmd,current_dev)) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating physical volume '%s' failed" % current_dev, rc=rc, err=err) + vgcreate_cmd = module.get_bin_path('vgcreate') + rc,_,err = module.run_command([vgcreate_cmd] + vgoptions + ['-s', str(pesize), vg, dev_string]) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating volume group '%s' failed"%vg, rc=rc, err=err) + else: + if state == 'absent': + if module.check_mode: + module.exit_json(changed=True) + else: + if this_vg['lv_count'] == 0 or force: + ### remove VG + vgremove_cmd = module.get_bin_path('vgremove', True) + rc,_,err = module.run_command("%s --force %s" % (vgremove_cmd, vg)) + if rc == 0: + module.exit_json(changed=True) + else: + module.fail_json(msg="Failed to remove volume group %s"%(vg),rc=rc, err=err) + else: + module.fail_json(msg="Refuse to remove non-empty volume group %s without force=yes"%(vg)) + + ### resize VG + current_devs = [ pv['name'] for pv in pvs if pv['vg_name'] == vg ] + devs_to_remove = list(set(current_devs) - set(dev_list)) + devs_to_add = list(set(dev_list) - set(current_devs)) + + if devs_to_add or devs_to_remove: + if module.check_mode: + changed = True + else: + if devs_to_add: + devs_to_add_string = ' '.join(devs_to_add) + ### create PV + pvcreate_cmd = module.get_bin_path('pvcreate', True) + for current_dev in devs_to_add: + rc,_,err = module.run_command("%s %s" % (pvcreate_cmd, current_dev)) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating physical volume '%s' failed"%current_dev, rc=rc, err=err) + ### add PV to our VG + vgextend_cmd = module.get_bin_path('vgextend', True) + rc,_,err = module.run_command("%s %s %s" % (vgextend_cmd, vg, devs_to_add_string)) + if rc == 0: + changed = True + else: + module.fail_json(msg="Unable to extend %s by %s."%(vg, devs_to_add_string),rc=rc,err=err) + + ### remove some PV from our VG + if devs_to_remove: + devs_to_remove_string = ' '.join(devs_to_remove) + vgreduce_cmd = module.get_bin_path('vgreduce', True) + rc,_,err = module.run_command("%s --force %s %s" % (vgreduce_cmd, vg, devs_to_remove_string)) + if rc == 0: + changed = True + else: + module.fail_json(msg="Unable to reduce %s by %s."%(vg, devs_to_remove_string),rc=rc,err=err) + + module.exit_json(changed=changed) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/system/lvol b/lib/ansible/modules/extras/system/lvol new file mode 100644 index 00000000000..96f1b846e27 --- /dev/null +++ b/lib/ansible/modules/extras/system/lvol @@ -0,0 +1,235 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Jeroen Hoekx , Alexander Bulimov +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +author: Jeroen Hoekx +module: lvol +short_description: Configure LVM logical volumes +description: + - This module creates, removes or resizes logical volumes. +version_added: "1.1" +options: + vg: + description: + - The volume group this logical volume is part of. + required: true + lv: + description: + - The name of the logical volume. + required: true + size: + description: + - The size of the logical volume, according to lvcreate(8) --size, by + default in megabytes or optionally with one of [bBsSkKmMgGtTpPeE] units; or + according to lvcreate(8) --extents as a percentage of [VG|PVS|FREE]; + resizing is not supported with percentages. + state: + choices: [ "present", "absent" ] + default: present + description: + - Control if the logical volume exists. + required: false + force: + version_added: "1.5" + choices: [ "yes", "no" ] + default: "no" + description: + - Shrink or remove operations of volumes requires this switch. Ensures that + that filesystems get never corrupted/destroyed by mistake. + required: false +notes: + - Filesystems on top of the volume are not resized. +''' + +EXAMPLES = ''' +# Create a logical volume of 512m. +- lvol: vg=firefly lv=test size=512 + +# Create a logical volume of 512g. +- lvol: vg=firefly lv=test size=512g + +# Create a logical volume the size of all remaining space in the volume group +- lvol: vg=firefly lv=test size=100%FREE + +# Extend the logical volume to 1024m. +- lvol: vg=firefly lv=test size=1024 + +# Reduce the logical volume to 512m +- lvol: vg=firefly lv=test size=512 force=yes + +# Remove the logical volume. +- lvol: vg=firefly lv=test state=absent force=yes +''' + +import re + +decimal_point = re.compile(r"(\.|,)") + + +def parse_lvs(data): + lvs = [] + for line in data.splitlines(): + parts = line.strip().split(';') + lvs.append({ + 'name': parts[0], + 'size': int(decimal_point.split(parts[1])[0]), + }) + return lvs + + +def main(): + module = AnsibleModule( + argument_spec=dict( + vg=dict(required=True), + lv=dict(required=True), + size=dict(), + state=dict(choices=["absent", "present"], default='present'), + force=dict(type='bool', default='no'), + ), + supports_check_mode=True, + ) + + vg = module.params['vg'] + lv = module.params['lv'] + size = module.params['size'] + state = module.params['state'] + force = module.boolean(module.params['force']) + size_opt = 'L' + size_unit = 'm' + + if size: + # LVCREATE(8) -l --extents option with percentage + if '%' in size: + size_parts = size.split('%', 1) + size_percent = int(size_parts[0]) + if size_percent > 100: + module.fail_json(msg="Size percentage cannot be larger than 100%") + size_whole = size_parts[1] + if size_whole == 'ORIGIN': + module.fail_json(msg="Snapshot Volumes are not supported") + elif size_whole not in ['VG', 'PVS', 'FREE']: + module.fail_json(msg="Specify extents as a percentage of VG|PVS|FREE") + size_opt = 'l' + size_unit = '' + + # LVCREATE(8) -L --size option unit + elif size[-1].isalpha(): + if size[-1] in 'bBsSkKmMgGtTpPeE': + size_unit = size[-1] + if size[0:-1].isdigit(): + size = int(size[0:-1]) + else: + module.fail_json(msg="Bad size specification for unit %s" % size_unit) + size_opt = 'L' + else: + module.fail_json(msg="Size unit should be one of [bBsSkKmMgGtTpPeE]") + # when no unit, megabytes by default + elif size.isdigit(): + size = int(size) + else: + module.fail_json(msg="Bad size specification") + + if size_opt == 'l': + unit = 'm' + else: + unit = size_unit + + rc, current_lvs, err = module.run_command( + "lvs --noheadings -o lv_name,size --units %s --separator ';' %s" % (unit, vg)) + + if rc != 0: + if state == 'absent': + module.exit_json(changed=False, stdout="Volume group %s does not exist." % vg, stderr=False) + else: + module.fail_json(msg="Volume group %s does not exist." % vg, rc=rc, err=err) + + changed = False + + lvs = parse_lvs(current_lvs) + + for test_lv in lvs: + if test_lv['name'] == lv: + this_lv = test_lv + break + else: + this_lv = None + + if state == 'present' and not size: + if this_lv is None: + module.fail_json(msg="No size given.") + else: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) + + msg = '' + if this_lv is None: + if state == 'present': + ### create LV + if module.check_mode: + changed = True + else: + rc, _, err = module.run_command("lvcreate -n %s -%s %s%s %s" % (lv, size_opt, size, size_unit, vg)) + if rc == 0: + changed = True + else: + module.fail_json(msg="Creating logical volume '%s' failed" % lv, rc=rc, err=err) + else: + if state == 'absent': + ### remove LV + if module.check_mode: + module.exit_json(changed=True) + if not force: + module.fail_json(msg="Sorry, no removal of logical volume %s without force=yes." % (this_lv['name'])) + rc, _, err = module.run_command("lvremove --force %s/%s" % (vg, this_lv['name'])) + if rc == 0: + module.exit_json(changed=True) + else: + module.fail_json(msg="Failed to remove logical volume %s" % (lv), rc=rc, err=err) + + elif size_opt == 'l': + module.exit_json(changed=False, msg="Resizing extents with percentage not supported.") + else: + ### resize LV + tool = None + if size > this_lv['size']: + tool = 'lvextend' + elif size < this_lv['size']: + if not force: + module.fail_json(msg="Sorry, no shrinking of %s without force=yes." % (this_lv['name'])) + tool = 'lvreduce --force' + + if tool: + if module.check_mode: + changed = True + else: + rc, _, err = module.run_command("%s -%s %s%s %s/%s" % (tool, size_opt, size, size_unit, vg, this_lv['name'])) + if rc == 0: + changed = True + elif "matches existing size" in err: + module.exit_json(changed=False, vg=vg, lv=this_lv['name'], size=this_lv['size']) + else: + module.fail_json(msg="Unable to resize %s to %s%s" % (lv, size, size_unit), rc=rc, err=err) + + module.exit_json(changed=changed, msg=msg) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/system/modprobe b/lib/ansible/modules/extras/system/modprobe new file mode 100644 index 00000000000..50c8f72fb2a --- /dev/null +++ b/lib/ansible/modules/extras/system/modprobe @@ -0,0 +1,115 @@ +#!/usr/bin/python +#coding: utf-8 -*- + +# (c) 2013, David Stygstra +# +# This file is part of Ansible +# +# This module is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This software is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this software. If not, see . + + +DOCUMENTATION = ''' +--- +module: modprobe +short_description: Add or remove kernel modules +requirements: [] +version_added: 1.4 +author: David Stygstra, Julien Dauphant, Matt Jeffery +description: + - Add or remove kernel modules. +options: + name: + required: true + description: + - Name of kernel module to manage. + state: + required: false + default: "present" + choices: [ present, absent ] + description: + - Whether the module should be present or absent. + params: + required: false + default: "" + version_added: "1.6" + description: + - Modules parameters. +''' + +EXAMPLES = ''' +# Add the 802.1q module +- modprobe: name=8021q state=present +# Add the dummy module +- modprobe: name=dummy state=present params="numdummies=2" +''' + +def main(): + module = AnsibleModule( + argument_spec={ + 'name': {'required': True}, + 'state': {'default': 'present', 'choices': ['present', 'absent']}, + 'params': {'default': ''}, + }, + supports_check_mode=True, + ) + args = { + 'changed': False, + 'failed': False, + 'name': module.params['name'], + 'state': module.params['state'], + 'params': module.params['params'], + } + + # Check if module is present + try: + modules = open('/proc/modules') + present = False + module_name = args['name'].replace('-', '_') + ' ' + for line in modules: + if line.startswith(module_name): + present = True + break + modules.close() + except IOError, e: + module.fail_json(msg=str(e), **args) + + # Check only; don't modify + if module.check_mode: + if args['state'] == 'present' and not present: + changed = True + elif args['state'] == 'absent' and present: + changed = True + else: + changed = False + module.exit_json(changed=changed) + + # Add/remove module as needed + if args['state'] == 'present': + if not present: + rc, _, err = module.run_command(['modprobe', args['name'], args['params']]) + if rc != 0: + module.fail_json(msg=err, **args) + args['changed'] = True + elif args['state'] == 'absent': + if present: + rc, _, err = module.run_command(['rmmod', args['name']]) + if rc != 0: + module.fail_json(msg=err, **args) + args['changed'] = True + + module.exit_json(**args) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/system/ohai b/lib/ansible/modules/extras/system/ohai new file mode 100644 index 00000000000..b50abc9db03 --- /dev/null +++ b/lib/ansible/modules/extras/system/ohai @@ -0,0 +1,56 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2012, Michael DeHaan +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: ohai +short_description: Returns inventory data from I(Ohai) +description: + - Similar to the M(facter) module, this runs the I(Ohai) discovery program + (U(http://wiki.opscode.com/display/chef/Ohai)) on the remote host and + returns JSON inventory data. + I(Ohai) data is a bit more verbose and nested than I(facter). +version_added: "0.6" +options: {} +notes: [] +requirements: [ "ohai" ] +author: Michael DeHaan +''' + +EXAMPLES = ''' +# Retrieve (ohai) data from all Web servers and store in one-file per host +ansible webservers -m ohai --tree=/tmp/ohaidata +''' + +def main(): + module = AnsibleModule( + argument_spec = dict() + ) + cmd = ["/usr/bin/env", "ohai"] + rc, out, err = module.run_command(cmd, check_rc=True) + module.exit_json(**json.loads(out)) + +# import module snippets +from ansible.module_utils.basic import * + +main() + + diff --git a/lib/ansible/modules/extras/system/open_iscsi b/lib/ansible/modules/extras/system/open_iscsi new file mode 100644 index 00000000000..c661a723d77 --- /dev/null +++ b/lib/ansible/modules/extras/system/open_iscsi @@ -0,0 +1,379 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Serge van Ginderachter +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: open_iscsi +author: Serge van Ginderachter +version_added: "1.4" +short_description: Manage iscsi targets with open-iscsi +description: + - Discover targets on given portal, (dis)connect targets, mark targets to + manually or auto start, return device nodes of connected targets. +requirements: + - open_iscsi library and tools (iscsiadm) +options: + portal: + required: false + aliases: [ip] + description: + - the ip address of the iscsi target + port: + required: false + default: 3260 + description: + - the port on which the iscsi target process listens + target: + required: false + aliases: [name, targetname] + description: + - the iscsi target name + login: + required: false + choices: [true, false] + description: + - whether the target node should be connected + node_auth: + required: false + default: CHAP + description: + - discovery.sendtargets.auth.authmethod + node_user: + required: false + description: + - discovery.sendtargets.auth.username + node_pass: + required: false + description: + - discovery.sendtargets.auth.password + auto_node_startup: + aliases: [automatic] + required: false + choices: [true, false] + description: + - whether the target node should be automatically connected at startup + discover: + required: false + choices: [true, false] + description: + - whether the list of target nodes on the portal should be + (re)discovered and added to the persistent iscsi database. + Keep in mind that iscsiadm discovery resets configurtion, like node.startup + to manual, hence combined with auto_node_startup=yes will allways return + a changed state. + show_nodes: + required: false + choices: [true, false] + description: + - whether the list of nodes in the persistent iscsi database should be + returned by the module + +examples: + - description: perform a discovery on 10.1.2.3 and show available target + nodes + code: > + open_iscsi: show_nodes=yes discover=yes portal=10.1.2.3 + - description: discover targets on portal and login to the one available + (only works if exactly one target is exported to the initiator) + code: > + open_iscsi: portal={{iscsi_target}} login=yes discover=yes + - description: connect to the named target, after updating the local + persistent database (cache) + code: > + open_iscsi: login=yes target=iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d + - description: discconnect from the cached named target + code: > + open_iscsi: login=no target=iqn.1986-03.com.sun:02:f8c1f9e0-c3ec-ec84-c9c9-8bfb0cd5de3d" +''' + +import glob +import time + +ISCSIADM = 'iscsiadm' + +def compare_nodelists(l1, l2): + + l1.sort() + l2.sort() + return l1 == l2 + + +def iscsi_get_cached_nodes(module, portal=None): + + cmd = '%s --mode node' % iscsiadm_cmd + (rc, out, err) = module.run_command(cmd) + + if rc == 0: + lines = out.splitlines() + nodes = [] + for line in lines: + # line format is "ip:port,target_portal_group_tag targetname" + parts = line.split() + if len(parts) > 2: + module.fail_json(msg='error parsing output', cmd=cmd) + target = parts[1] + parts = parts[0].split(':') + target_portal = parts[0] + + if portal is None or portal == target_portal: + nodes.append(target) + + # older versions of scsiadm don't have nice return codes + # for newer versions see iscsiadm(8); also usr/iscsiadm.c for details + # err can contain [N|n]o records... + elif rc == 21 or (rc == 255 and "o records found" in err): + nodes = [] + else: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + return nodes + + +def iscsi_discover(module, portal, port): + + cmd = '%s --mode discovery --type sendtargets --portal %s:%s' % (iscsiadm_cmd, portal, port) + (rc, out, err) = module.run_command(cmd) + + if rc > 0: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def target_loggedon(module, target): + + cmd = '%s --mode session' % iscsiadm_cmd + (rc, out, err) = module.run_command(cmd) + + if rc == 0: + return target in out + elif rc == 21: + return False + else: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def target_login(module, target): + + node_auth = module.params['node_auth'] + node_user = module.params['node_user'] + node_pass = module.params['node_pass'] + + if node_user: + params = [('node.session.auth.authmethod', node_auth), + ('node.session.auth.username', node_user), + ('node.session.auth.password', node_pass)] + for (name, value) in params: + cmd = '%s --mode node --targetname %s --op=update --name %s --value %s' % (iscsiadm_cmd, target, name, value) + (rc, out, err) = module.run_command(cmd) + if rc > 0: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + cmd = '%s --mode node --targetname %s --login' % (iscsiadm_cmd, target) + (rc, out, err) = module.run_command(cmd) + + if rc > 0: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def target_logout(module, target): + + cmd = '%s --mode node --targetname %s --logout' % (iscsiadm_cmd, target) + (rc, out, err) = module.run_command(cmd) + + if rc > 0: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def target_device_node(module, target): + + # if anyone know a better way to find out which devicenodes get created for + # a given target... + + devices = glob.glob('/dev/disk/by-path/*%s*' % target) + if len(devices) == 0: + return None + else: + devdisks = [] + for dev in devices: + # exclude partitions + if "-part" not in dev: + devdisk = os.path.realpath(dev) + # only add once (multi-path?) + if devdisk not in devdisks: + devdisks.append(devdisk) + return devdisks + + +def target_isauto(module, target): + + cmd = '%s --mode node --targetname %s' % (iscsiadm_cmd, target) + (rc, out, err) = module.run_command(cmd) + + if rc == 0: + lines = out.splitlines() + for line in lines: + if 'node.startup' in line: + return 'automatic' in line + return False + else: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def target_setauto(module, target): + + cmd = '%s --mode node --targetname %s --op=update --name node.startup --value automatic' % (iscsiadm_cmd, target) + (rc, out, err) = module.run_command(cmd) + + if rc > 0: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def target_setmanual(module, target): + + cmd = '%s --mode node --targetname %s --op=update --name node.startup --value manual' % (iscsiadm_cmd, target) + (rc, out, err) = module.run_command(cmd) + + if rc > 0: + module.fail_json(cmd=cmd, rc=rc, msg=err) + + +def main(): + + # load ansible module object + module = AnsibleModule( + argument_spec = dict( + + # target + portal = dict(required=False, aliases=['ip']), + port = dict(required=False, default=3260), + target = dict(required=False, aliases=['name', 'targetname']), + node_auth = dict(required=False, default='CHAP'), + node_user = dict(required=False), + node_pass = dict(required=False), + + # actions + login = dict(type='bool', aliases=['state']), + auto_node_startup = dict(type='bool', aliases=['automatic']), + discover = dict(type='bool', default=False), + show_nodes = dict(type='bool', default=False) + ), + + required_together=[['discover_user', 'discover_pass'], + ['node_user', 'node_pass']], + supports_check_mode=True + ) + + global iscsiadm_cmd + iscsiadm_cmd = module.get_bin_path('iscsiadm', required=True) + + # parameters + portal = module.params['portal'] + target = module.params['target'] + port = module.params['port'] + login = module.params['login'] + automatic = module.params['auto_node_startup'] + discover = module.params['discover'] + show_nodes = module.params['show_nodes'] + + check = module.check_mode + + cached = iscsi_get_cached_nodes(module, portal) + + # return json dict + result = {} + result['changed'] = False + + if discover: + if portal is None: + module.fail_json(msg = "Need to specify at least the portal (ip) to discover") + elif check: + nodes = cached + else: + iscsi_discover(module, portal, port) + nodes = iscsi_get_cached_nodes(module, portal) + if not compare_nodelists(cached, nodes): + result['changed'] |= True + result['cache_updated'] = True + else: + nodes = cached + + if login is not None or automatic is not None: + if target is None: + if len(nodes) > 1: + module.fail_json(msg = "Need to specify a target") + else: + target = nodes[0] + else: + # check given target is in cache + check_target = False + for node in nodes: + if node == target: + check_target = True + break + if not check_target: + module.fail_json(msg = "Specified target not found") + + if show_nodes: + result['nodes'] = nodes + + if login is not None: + loggedon = target_loggedon(module,target) + if (login and loggedon) or (not login and not loggedon): + result['changed'] |= False + if login: + result['devicenodes'] = target_device_node(module,target) + elif not check: + if login: + target_login(module, target) + # give udev some time + time.sleep(1) + result['devicenodes'] = target_device_node(module,target) + else: + target_logout(module, target) + result['changed'] |= True + result['connection_changed'] = True + else: + result['changed'] |= True + result['connection_changed'] = True + + if automatic is not None: + isauto = target_isauto(module, target) + if (automatic and isauto) or (not automatic and not isauto): + result['changed'] |= False + result['automatic_changed'] = False + elif not check: + if automatic: + target_setauto(module, target) + else: + target_setmanual(module, target) + result['changed'] |= True + result['automatic_changed'] = True + else: + result['changed'] |= True + result['automatic_changed'] = True + + module.exit_json(**result) + + + +# import module snippets +from ansible.module_utils.basic import * + +main() + diff --git a/lib/ansible/modules/extras/system/ufw b/lib/ansible/modules/extras/system/ufw new file mode 100644 index 00000000000..e917a3bc749 --- /dev/null +++ b/lib/ansible/modules/extras/system/ufw @@ -0,0 +1,269 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Ahti Kitsik +# (c) 2014, Jarno Keskikangas +# (c) 2013, Aleksey Ovcharenko +# (c) 2013, James Martin +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = ''' +--- +module: ufw +short_description: Manage firewall with UFW +description: + - Manage firewall with UFW. +version_added: 1.6 +author: Aleksey Ovcharenko, Jarno Keskikangas, Ahti Kitsik +notes: + - See C(man ufw) for more examples. +requirements: + - C(ufw) package +options: + state: + description: + - C(enabled) reloads firewall and enables firewall on boot. + - C(disabled) unloads firewall and disables firewall on boot. + - C(reloaded) reloads firewall. + - C(reset) disables and resets firewall to installation defaults. + required: false + choices: ['enabled', 'disabled', 'reloaded', 'reset'] + policy: + description: + - Change the default policy for incoming or outgoing traffic. + required: false + alias: default + choices: ['allow', 'deny', 'reject'] + direction: + description: + - Select direction for a rule or default policy command. + required: false + choices: ['in', 'out', 'incoming', 'outgoing'] + logging: + description: + - Toggles logging. Logged packets use the LOG_KERN syslog facility. + choices: ['on', 'off', 'low', 'medium', 'high', 'full'] + required: false + insert: + description: + - Insert the corresponding rule as rule number NUM + required: false + rule: + description: + - Add firewall rule + required: false + choices: ['allow', 'deny', 'reject', 'limit'] + log: + description: + - Log new connections matched to this rule + required: false + choices: ['yes', 'no'] + from_ip: + description: + - Source IP address. + required: false + aliases: ['from', 'src'] + default: 'any' + from_port: + description: + - Source port. + required: false + to_ip: + description: + - Destination IP address. + required: false + aliases: ['to', 'dest'] + default: 'any' + to_port: + description: + - Destination port. + required: false + aliases: ['port'] + proto: + description: + - TCP/IP protocol. + choices: ['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah'] + required: false + name: + description: + - Use profile located in C(/etc/ufw/applications.d) + required: false + aliases: ['app'] + delete: + description: + - Delete rule. + required: false + choices: ['yes', 'no'] + interface: + description: + - Specify interface for rule. + required: false + aliases: ['if'] +''' + +EXAMPLES = ''' +# Allow everything and enable UFW +ufw: state=enabled policy=allow + +# Set logging +ufw: logging=on + +# Sometimes it is desirable to let the sender know when traffic is +# being denied, rather than simply ignoring it. In these cases, use +# reject instead of deny. In addition, log rejected connections: +ufw: rule=reject port=auth log=yes + +# ufw supports connection rate limiting, which is useful for protecting +# against brute-force login attacks. ufw will deny connections if an IP +# address has attempted to initiate 6 or more connections in the last +# 30 seconds. See http://www.debian-administration.org/articles/187 +# for details. Typical usage is: +ufw: rule=limit port=ssh proto=tcp + +# Allow OpenSSH +ufw: rule=allow name=OpenSSH + +# Delete OpenSSH rule +ufw: rule=allow name=OpenSSH delete=yes + +# Deny all access to port 53: +ufw: rule=deny port=53 + +# Allow all access to tcp port 80: +ufw: rule=allow port=80 proto=tcp + +# Allow all access from RFC1918 networks to this host: +ufw: rule=allow src={{ item }} +with_items: +- 10.0.0.0/8 +- 172.16.0.0/12 +- 192.168.0.0/16 + +# Deny access to udp port 514 from host 1.2.3.4: +ufw: rule=deny proto=udp src=1.2.3.4 port=514 + +# Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469 +ufw: rule=allow interface=eth0 direction=in proto=udp src=1.2.3.5 from_port=5469 dest=1.2.3.4 to_port=5469 + +# Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host. +# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work. +ufw: rule=deny proto=tcp src=2001:db8::/32 port=25 +''' + +from operator import itemgetter + + +def main(): + module = AnsibleModule( + argument_spec = dict( + state = dict(default=None, choices=['enabled', 'disabled', 'reloaded', 'reset']), + default = dict(default=None, aliases=['policy'], choices=['allow', 'deny', 'reject']), + logging = dict(default=None, choices=['on', 'off', 'low', 'medium', 'high', 'full']), + direction = dict(default=None, choices=['in', 'incoming', 'out', 'outgoing']), + delete = dict(default=False, type='bool'), + insert = dict(default=None), + rule = dict(default=None, choices=['allow', 'deny', 'reject', 'limit']), + interface = dict(default=None, aliases=['if']), + log = dict(default=False, type='bool'), + from_ip = dict(default='any', aliases=['src', 'from']), + from_port = dict(default=None), + to_ip = dict(default='any', aliases=['dest', 'to']), + to_port = dict(default=None, aliases=['port']), + proto = dict(default=None, aliases=['protocol'], choices=['any', 'tcp', 'udp', 'ipv6', 'esp', 'ah']), + app = dict(default=None, aliases=['name']) + ), + supports_check_mode = True, + mutually_exclusive = [['app', 'proto', 'logging']] + ) + + cmds = [] + + def execute(cmd): + cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd))) + + cmds.append(cmd) + (rc, out, err) = module.run_command(cmd) + + if rc != 0: + module.fail_json(msg=err or out) + + params = module.params + + # Ensure at least one of the command arguments are given + command_keys = ['state', 'default', 'rule', 'logging'] + commands = dict((key, params[key]) for key in command_keys if params[key]) + + if len(commands) < 1: + module.fail_json(msg="Not any of the command arguments %s given" % commands) + + if('interface' in params and 'direction' not in params): + module.fail_json(msg="Direction must be specified when creating a rule on an interface") + + # Ensure ufw is available + ufw_bin = module.get_bin_path('ufw', True) + + # Save the pre state and rules in order to recognize changes + (_, pre_state, _) = module.run_command(ufw_bin + ' status verbose') + (_, pre_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user*.rules") + + # Execute commands + for (command, value) in commands.iteritems(): + cmd = [[ufw_bin], [module.check_mode, '--dry-run']] + + if command == 'state': + states = { 'enabled': 'enable', 'disabled': 'disable', + 'reloaded': 'reload', 'reset': 'reset' } + execute(cmd + [['-f'], [states[value]]]) + + elif command == 'logging': + execute(cmd + [[command], [value]]) + + elif command == 'default': + execute(cmd + [[command], [value], [params['direction']]]) + + elif command == 'rule': + # Rules are constructed according to the long format + # + # ufw [--dry-run] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \ + # [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \ + # [proto protocol] [app application] + cmd.append([module.boolean(params['delete']), 'delete']) + cmd.append([params['insert'], "insert %s" % params['insert']]) + cmd.append([value]) + cmd.append([module.boolean(params['log']), 'log']) + + for (key, template) in [('direction', "%s" ), ('interface', "on %s" ), + ('from_ip', "from %s" ), ('from_port', "port %s" ), + ('to_ip', "to %s" ), ('to_port', "port %s" ), + ('proto', "proto %s"), ('app', "app '%s'")]: + + value = params[key] + cmd.append([value, template % (value)]) + + execute(cmd) + + # Get the new state + (_, post_state, _) = module.run_command(ufw_bin + ' status verbose') + (_, post_rules, _) = module.run_command("grep '^### tuple' /lib/ufw/user*.rules") + changed = (pre_state != post_state) or (pre_rules != post_rules) + + return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip()) + +# import module snippets +from ansible.module_utils.basic import * + +main() diff --git a/lib/ansible/modules/extras/system/zfs b/lib/ansible/modules/extras/system/zfs new file mode 100644 index 00000000000..93248897051 --- /dev/null +++ b/lib/ansible/modules/extras/system/zfs @@ -0,0 +1,417 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Johan Wiren +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = ''' +--- +module: zfs +short_description: Manage zfs +description: + - Manages ZFS file systems on Solaris and FreeBSD. Can manage file systems, volumes and snapshots. See zfs(1M) for more information about the properties. +version_added: "1.1" +options: + name: + description: + - File system, snapshot or volume name e.g. C(rpool/myfs) + required: true + state: + description: + - Whether to create (C(present)), or remove (C(absent)) a file system, snapshot or volume. + required: true + choices: [present, absent] + aclinherit: + description: + - The aclinherit property. + required: False + choices: [discard,noallow,restricted,passthrough,passthrough-x] + aclmode: + description: + - The aclmode property. + required: False + choices: [discard,groupmask,passthrough] + atime: + description: + - The atime property. + required: False + choices: ['on','off'] + canmount: + description: + - The canmount property. + required: False + choices: ['on','off','noauto'] + casesensitivity: + description: + - The casesensitivity property. + required: False + choices: [sensitive,insensitive,mixed] + checksum: + description: + - The checksum property. + required: False + choices: ['on','off',fletcher2,fletcher4,sha256] + compression: + description: + - The compression property. + required: False + choices: ['on','off',lzjb,gzip,gzip-1,gzip-2,gzip-3,gzip-4,gzip-5,gzip-6,gzip-7,gzip-8,gzip-9,lz4,zle] + copies: + description: + - The copies property. + required: False + choices: [1,2,3] + dedup: + description: + - The dedup property. + required: False + choices: ['on','off'] + devices: + description: + - The devices property. + required: False + choices: ['on','off'] + exec: + description: + - The exec property. + required: False + choices: ['on','off'] + jailed: + description: + - The jailed property. + required: False + choices: ['on','off'] + logbias: + description: + - The logbias property. + required: False + choices: [latency,throughput] + mountpoint: + description: + - The mountpoint property. + required: False + nbmand: + description: + - The nbmand property. + required: False + choices: ['on','off'] + normalization: + description: + - The normalization property. + required: False + choices: [none,formC,formD,formKC,formKD] + primarycache: + description: + - The primarycache property. + required: False + choices: [all,none,metadata] + quota: + description: + - The quota property. + required: False + readonly: + description: + - The readonly property. + required: False + choices: ['on','off'] + recordsize: + description: + - The recordsize property. + required: False + refquota: + description: + - The refquota property. + required: False + refreservation: + description: + - The refreservation property. + required: False + reservation: + description: + - The reservation property. + required: False + secondarycache: + description: + - The secondarycache property. + required: False + choices: [all,none,metadata] + setuid: + description: + - The setuid property. + required: False + choices: ['on','off'] + shareiscsi: + description: + - The shareiscsi property. + required: False + choices: ['on','off'] + sharenfs: + description: + - The sharenfs property. + required: False + sharesmb: + description: + - The sharesmb property. + required: False + snapdir: + description: + - The snapdir property. + required: False + choices: [hidden,visible] + sync: + description: + - The sync property. + required: False + choices: ['on','off'] + utf8only: + description: + - The utf8only property. + required: False + choices: ['on','off'] + volsize: + description: + - The volsize property. + required: False + volblocksize: + description: + - The volblocksize property. + required: False + vscan: + description: + - The vscan property. + required: False + choices: ['on','off'] + xattr: + description: + - The xattr property. + required: False + choices: ['on','off'] + zoned: + description: + - The zoned property. + required: False + choices: ['on','off'] +author: Johan Wiren +''' + +EXAMPLES = ''' +# Create a new file system called myfs in pool rpool +- zfs: name=rpool/myfs state=present + +# Create a new volume called myvol in pool rpool. +- zfs: name=rpool/myvol state=present volsize=10M + +# Create a snapshot of rpool/myfs file system. +- zfs: name=rpool/myfs@mysnapshot state=present + +# Create a new file system called myfs2 with snapdir enabled +- zfs: name=rpool/myfs2 state=present snapdir=enabled +''' + + +import os + +class Zfs(object): + def __init__(self, module, name, properties): + self.module = module + self.name = name + self.properties = properties + self.changed = False + + self.immutable_properties = [ 'casesensitivity', 'normalization', 'utf8only' ] + + def exists(self): + cmd = [self.module.get_bin_path('zfs', True)] + cmd.append('list') + cmd.append('-t all') + cmd.append(self.name) + (rc, out, err) = self.module.run_command(' '.join(cmd)) + if rc == 0: + return True + else: + return False + + def create(self): + if self.module.check_mode: + self.changed = True + return + properties=self.properties + volsize = properties.pop('volsize', None) + volblocksize = properties.pop('volblocksize', None) + if "@" in self.name: + action = 'snapshot' + else: + action = 'create' + + cmd = [self.module.get_bin_path('zfs', True)] + cmd.append(action) + if volblocksize: + cmd.append('-b %s' % volblocksize) + if properties: + for prop, value in properties.iteritems(): + cmd.append('-o %s="%s"' % (prop, value)) + if volsize: + cmd.append('-V') + cmd.append(volsize) + cmd.append(self.name) + (rc, err, out) = self.module.run_command(' '.join(cmd)) + if rc == 0: + self.changed=True + else: + self.module.fail_json(msg=out) + + def destroy(self): + if self.module.check_mode: + self.changed = True + return + cmd = [self.module.get_bin_path('zfs', True)] + cmd.append('destroy') + cmd.append(self.name) + (rc, err, out) = self.module.run_command(' '.join(cmd)) + if rc == 0: + self.changed = True + else: + self.module.fail_json(msg=out) + + def set_property(self, prop, value): + if self.module.check_mode: + self.changed = True + return + cmd = self.module.get_bin_path('zfs', True) + args = [cmd, 'set', prop + '=' + value, self.name] + (rc, err, out) = self.module.run_command(args) + if rc == 0: + self.changed = True + else: + self.module.fail_json(msg=out) + + def set_properties_if_changed(self): + current_properties = self.get_current_properties() + for prop, value in self.properties.iteritems(): + if current_properties[prop] != value: + if prop in self.immutable_properties: + self.module.fail_json(msg='Cannot change property %s after creation.' % prop) + else: + self.set_property(prop, value) + + def get_current_properties(self): + def get_properties_by_name(propname): + cmd = [self.module.get_bin_path('zfs', True)] + cmd += ['get', '-H', propname, self.name] + rc, out, err = self.module.run_command(cmd) + return [l.split('\t')[1:3] for l in out.splitlines()] + properties = dict(get_properties_by_name('all')) + if 'share.*' in properties: + # Some ZFS pools list the sharenfs and sharesmb properties + # hierarchically as share.nfs and share.smb respectively. + del properties['share.*'] + for p, v in get_properties_by_name('share.all'): + alias = p.replace('.', '') # share.nfs -> sharenfs (etc) + properties[alias] = v + return properties + + def run_command(self, cmd): + progname = cmd[0] + cmd[0] = module.get_bin_path(progname, True) + return module.run_command(cmd) + +def main(): + + # FIXME: should use dict() constructor like other modules, required=False is default + module = AnsibleModule( + argument_spec = { + 'name': {'required': True}, + 'state': {'required': True, 'choices':['present', 'absent']}, + 'aclinherit': {'required': False, 'choices':['discard', 'noallow', 'restricted', 'passthrough', 'passthrough-x']}, + 'aclmode': {'required': False, 'choices':['discard', 'groupmask', 'passthrough']}, + 'atime': {'required': False, 'choices':['on', 'off']}, + 'canmount': {'required': False, 'choices':['on', 'off', 'noauto']}, + 'casesensitivity': {'required': False, 'choices':['sensitive', 'insensitive', 'mixed']}, + 'checksum': {'required': False, 'choices':['on', 'off', 'fletcher2', 'fletcher4', 'sha256']}, + 'compression': {'required': False, 'choices':['on', 'off', 'lzjb', 'gzip', 'gzip-1', 'gzip-2', 'gzip-3', 'gzip-4', 'gzip-5', 'gzip-6', 'gzip-7', 'gzip-8', 'gzip-9', 'lz4', 'zle']}, + 'copies': {'required': False, 'choices':['1', '2', '3']}, + 'dedup': {'required': False, 'choices':['on', 'off']}, + 'devices': {'required': False, 'choices':['on', 'off']}, + 'exec': {'required': False, 'choices':['on', 'off']}, + # Not supported + #'groupquota': {'required': False}, + 'jailed': {'required': False, 'choices':['on', 'off']}, + 'logbias': {'required': False, 'choices':['latency', 'throughput']}, + 'mountpoint': {'required': False}, + 'nbmand': {'required': False, 'choices':['on', 'off']}, + 'normalization': {'required': False, 'choices':['none', 'formC', 'formD', 'formKC', 'formKD']}, + 'primarycache': {'required': False, 'choices':['all', 'none', 'metadata']}, + 'quota': {'required': False}, + 'readonly': {'required': False, 'choices':['on', 'off']}, + 'recordsize': {'required': False}, + 'refquota': {'required': False}, + 'refreservation': {'required': False}, + 'reservation': {'required': False}, + 'secondarycache': {'required': False, 'choices':['all', 'none', 'metadata']}, + 'setuid': {'required': False, 'choices':['on', 'off']}, + 'shareiscsi': {'required': False, 'choices':['on', 'off']}, + 'sharenfs': {'required': False}, + 'sharesmb': {'required': False}, + 'snapdir': {'required': False, 'choices':['hidden', 'visible']}, + 'sync': {'required': False, 'choices':['on', 'off']}, + # Not supported + #'userquota': {'required': False}, + 'utf8only': {'required': False, 'choices':['on', 'off']}, + 'volsize': {'required': False}, + 'volblocksize': {'required': False}, + 'vscan': {'required': False, 'choices':['on', 'off']}, + 'xattr': {'required': False, 'choices':['on', 'off']}, + 'zoned': {'required': False, 'choices':['on', 'off']}, + }, + supports_check_mode=True + ) + + state = module.params.pop('state') + name = module.params.pop('name') + + # Get all valid zfs-properties + properties = dict() + for prop, value in module.params.iteritems(): + if prop in ['CHECKMODE']: + continue + if value: + properties[prop] = value + + result = {} + result['name'] = name + result['state'] = state + + zfs=Zfs(module, name, properties) + + if state == 'present': + if zfs.exists(): + zfs.set_properties_if_changed() + else: + zfs.create() + + elif state == 'absent': + if zfs.exists(): + zfs.destroy() + + result.update(zfs.properties) + result['changed'] = zfs.changed + module.exit_json(**result) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/web_infrastructure/ejabberd_user b/lib/ansible/modules/extras/web_infrastructure/ejabberd_user new file mode 100755 index 00000000000..d8b0384679c --- /dev/null +++ b/lib/ansible/modules/extras/web_infrastructure/ejabberd_user @@ -0,0 +1,214 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright (C) 2013, Peter Sprygada +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# +DOCUMENTATION = ''' +--- +module: ejabberd_user +version_added: "1.5" +author: Peter Sprygada +short_description: Manages users for ejabberd servers +requirements: + - ejabberd with mod_admin_extra +description: + - This module provides user management for ejabberd servers +options: + username: + description: + - the name of the user to manage + required: true + host: + description: + - the ejabberd host associated with this username + required: true + password: + description: + - the password to assign to the username + required: false + logging: + description: + - enables or disables the local syslog facility for this module + required: false + default: false + choices: [ 'true', 'false', 'yes', 'no' ] + state: + description: + - describe the desired state of the user to be managed + required: false + default: 'present' + choices: [ 'present', 'absent' ] +notes: + - Password parameter is required for state == present only + - Passwords must be stored in clear text for this release + - The ejabberd configuration file must include mod_admin_extra as a module. +''' +EXAMPLES = ''' +Example playbook entries using the ejabberd_user module to manage users state. + + tasks: + + - name: create a user if it does not exists + action: ejabberd_user username=test host=server password=password + + - name: delete a user if it exists + action: ejabberd_user username=test host=server state=absent +''' +import syslog + +class EjabberdUserException(Exception): + """ Base exeption for EjabberdUser class object """ + pass + +class EjabberdUser(object): + """ This object represents a user resource for an ejabberd server. The + object manages user creation and deletion using ejabberdctl. The following + commands are currently supported: + * ejabberdctl register + * ejabberdctl deregister + """ + + def __init__(self, module): + self.module = module + self.logging = module.params.get('logging') + self.state = module.params.get('state') + self.host = module.params.get('host') + self.user = module.params.get('username') + self.pwd = module.params.get('password') + + @property + def changed(self): + """ This method will check the current user and see if the password has + changed. It will return True if the user does not match the supplied + credentials and False if it does not + """ + try: + options = [self.user, self.host, self.pwd] + (rc, out, err) = self.run_command('check_password', options) + except EjabberdUserException, e: + (rc, out, err) = (1, None, "required attribute(s) missing") + return rc + + @property + def exists(self): + """ This method will check to see if the supplied username exists for + host specified. If the user exists True is returned, otherwise False + is returned + """ + try: + options = [self.user, self.host] + (rc, out, err) = self.run_command('check_account', options) + except EjabberdUserException, e: + (rc, out, err) = (1, None, "required attribute(s) missing") + return True if rc == 0 else False + + def log(self, entry): + """ This method will log information to the local syslog facility """ + if self.logging: + syslog.openlog('ansible-%s' % os.path.basename(__file__)) + syslog.syslog(syslog.LOG_NOTICE, entry) + + def run_command(self, cmd, options): + """ This method will run the any command specified and return the + returns using the Ansible common module + """ + if not all(options): + raise EjabberdUserException + + cmd = 'ejabberdctl %s ' % cmd + cmd += " ".join(options) + self.log('command: %s' % cmd) + return self.module.run_command(cmd.split()) + + def update(self): + """ The update method will update the credentials for the user provided + """ + try: + options = [self.user, self.host, self.pwd] + (rc, out, err) = self.run_command('change_password', options) + except EjabberdUserException, e: + (rc, out, err) = (1, None, "required attribute(s) missing") + return (rc, out, err) + + def create(self): + """ The create method will create a new user on the host with the + password provided + """ + try: + options = [self.user, self.host, self.pwd] + (rc, out, err) = self.run_command('register', options) + except EjabberdUserException, e: + (rc, out, err) = (1, None, "required attribute(s) missing") + return (rc, out, err) + + def delete(self): + """ The delete method will delete the user from the host + """ + try: + options = [self.user, self.host] + (rc, out, err) = self.run_command('unregister', options) + except EjabberdUserException, e: + (rc, out, err) = (1, None, "required attribute(s) missing") + return (rc, out, err) + +def main(): + module = AnsibleModule( + argument_spec = dict( + host=dict(default=None, type='str'), + username=dict(default=None, type='str'), + password=dict(default=None, type='str'), + state=dict(default='present', choices=['present', 'absent']), + logging=dict(default=False, type='bool') + ), + supports_check_mode = True + ) + + obj = EjabberdUser(module) + + rc = None + result = dict() + + if obj.state == 'absent': + if obj.exists: + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = obj.delete() + if rc != 0: + module.fail_json(msg=err, rc=rc) + + elif obj.state == 'present': + if not obj.exists: + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = obj.create() + elif obj.changed: + if module.check_mode: + module.exit_json(changed=True) + (rc, out, err) = obj.update() + if rc is not None and rc != 0: + module.fail_json(msg=err, rc=rc) + + if rc is None: + result['changed'] = False + else: + result['changed'] = True + + module.exit_json(**result) + + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/web_infrastructure/jboss b/lib/ansible/modules/extras/web_infrastructure/jboss new file mode 100644 index 00000000000..9478235698c --- /dev/null +++ b/lib/ansible/modules/extras/web_infrastructure/jboss @@ -0,0 +1,140 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2013, Jeroen Hoekx +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . + +DOCUMENTATION = """ +module: jboss +version_added: "1.4" +short_description: deploy applications to JBoss +description: + - Deploy applications to JBoss standalone using the filesystem +options: + deployment: + required: true + description: + - The name of the deployment + src: + required: false + description: + - The remote path of the application ear or war to deploy + deploy_path: + required: false + default: /var/lib/jbossas/standalone/deployments + description: + - The location in the filesystem where the deployment scanner listens + state: + required: false + choices: [ present, absent ] + default: "present" + description: + - Whether the application should be deployed or undeployed +notes: + - "The JBoss standalone deployment-scanner has to be enabled in standalone.xml" + - "Ensure no identically named application is deployed through the JBoss CLI" +author: Jeroen Hoekx +""" + +EXAMPLES = """ +# Deploy a hello world application +- jboss: src=/tmp/hello-1.0-SNAPSHOT.war deployment=hello.war state=present +# Update the hello world application +- jboss: src=/tmp/hello-1.1-SNAPSHOT.war deployment=hello.war state=present +# Undeploy the hello world application +- jboss: deployment=hello.war state=absent +""" + +import os +import shutil +import time + +def is_deployed(deploy_path, deployment): + return os.path.exists(os.path.join(deploy_path, "%s.deployed"%(deployment))) + +def is_undeployed(deploy_path, deployment): + return os.path.exists(os.path.join(deploy_path, "%s.undeployed"%(deployment))) + +def is_failed(deploy_path, deployment): + return os.path.exists(os.path.join(deploy_path, "%s.failed"%(deployment))) + +def main(): + module = AnsibleModule( + argument_spec = dict( + src=dict(), + deployment=dict(required=True), + deploy_path=dict(default='/var/lib/jbossas/standalone/deployments'), + state=dict(choices=['absent', 'present'], default='present'), + ), + ) + + changed = False + + src = module.params['src'] + deployment = module.params['deployment'] + deploy_path = module.params['deploy_path'] + state = module.params['state'] + + if state == 'present' and not src: + module.fail_json(msg="Argument 'src' required.") + + if not os.path.exists(deploy_path): + module.fail_json(msg="deploy_path does not exist.") + + deployed = is_deployed(deploy_path, deployment) + + if state == 'present' and not deployed: + if not os.path.exists(src): + module.fail_json(msg='Source file %s does not exist.'%(src)) + if is_failed(deploy_path, deployment): + ### Clean up old failed deployment + os.remove(os.path.join(deploy_path, "%s.failed"%(deployment))) + + shutil.copyfile(src, os.path.join(deploy_path, deployment)) + while not deployed: + deployed = is_deployed(deploy_path, deployment) + if is_failed(deploy_path, deployment): + module.fail_json(msg='Deploying %s failed.'%(deployment)) + time.sleep(1) + changed = True + + if state == 'present' and deployed: + if module.md5(src) != module.md5(os.path.join(deploy_path, deployment)): + os.remove(os.path.join(deploy_path, "%s.deployed"%(deployment))) + shutil.copyfile(src, os.path.join(deploy_path, deployment)) + deployed = False + while not deployed: + deployed = is_deployed(deploy_path, deployment) + if is_failed(deploy_path, deployment): + module.fail_json(msg='Deploying %s failed.'%(deployment)) + time.sleep(1) + changed = True + + if state == 'absent' and deployed: + os.remove(os.path.join(deploy_path, "%s.deployed"%(deployment))) + while deployed: + deployed = not is_undeployed(deploy_path, deployment) + if is_failed(deploy_path, deployment): + module.fail_json(msg='Undeploying %s failed.'%(deployment)) + time.sleep(1) + changed = True + + module.exit_json(changed=changed) + +# import module snippets +from ansible.module_utils.basic import * +main() diff --git a/lib/ansible/modules/extras/web_infrastructure/jira b/lib/ansible/modules/extras/web_infrastructure/jira new file mode 100644 index 00000000000..950fc3dbfcf --- /dev/null +++ b/lib/ansible/modules/extras/web_infrastructure/jira @@ -0,0 +1,347 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2014, Steve Smith +# Atlassian open-source approval reference OSR-76. +# +# This file is part of Ansible. +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see . +# + +DOCUMENTATION = """ +module: jira +version_added: "1.6" +short_description: create and modify issues in a JIRA instance +description: + - Create and modify issues in a JIRA instance. + +options: + uri: + required: true + description: + - Base URI for the JIRA instance + + operation: + required: true + aliases: [ command ] + choices: [ create, comment, edit, fetch, transition ] + description: + - The operation to perform. + + username: + required: true + description: + - The username to log-in with. + + password: + required: true + description: + - The password to log-in with. + + project: + aliases: [ prj ] + required: false + description: + - The project for this operation. Required for issue creation. + + summary: + required: false + description: + - The issue summary, where appropriate. + + description: + required: false + description: + - The issue description, where appropriate. + + issuetype: + required: false + description: + - The issue type, for issue creation. + + issue: + required: false + description: + - An existing issue key to operate on. + + comment: + required: false + description: + - The comment text to add. + + status: + required: false + description: + - The desired status; only relevant for the transition operation. + + assignee: + required: false + description: + - Sets the assignee on create or transition operations. Note not all transitions will allow this. + + fields: + required: false + description: + - This is a free-form data structure that can contain arbitrary data. This is passed directly to the JIRA REST API (possibly after merging with other required data, as when passed to create). See examples for more information, and the JIRA REST API for the structure required for various fields. + +notes: + - "Currently this only works with basic-auth." + +author: Steve Smith +""" + +EXAMPLES = """ +# Create a new issue and add a comment to it: +- name: Create an issue + jira: uri={{server}} username={{user}} password={{pass}} + project=ANS operation=create + summary="Example Issue" description="Created using Ansible" issuetype=Task + register: issue + +- name: Comment on issue + jira: uri={{server}} username={{user}} password={{pass}} + issue={{issue.meta.key}} operation=comment + comment="A comment added by Ansible" + +# Assign an existing issue using edit +- name: Assign an issue using free-form fields + jira: uri={{server}} username={{user}} password={{pass}} + issue={{issue.meta.key}} operation=edit + assignee=ssmith + +# Create an issue with an existing assignee +- name: Create an assigned issue + jira: uri={{server}} username={{user}} password={{pass}} + project=ANS operation=create + summary="Assigned issue" description="Created and assigned using Ansible" + issuetype=Task assignee=ssmith + +# Edit an issue using free-form fields +- name: Set the labels on an issue using free-form fields + jira: uri={{server}} username={{user}} password={{pass}} + issue={{issue.meta.key}} operation=edit + args: { fields: {labels: ["autocreated", "ansible"]}} + +- name: Set the labels on an issue, YAML version + jira: uri={{server}} username={{user}} password={{pass}} + issue={{issue.meta.key}} operation=edit + args: + fields: + labels: + - "autocreated" + - "ansible" + - "yaml" + +# Retrieve metadata for an issue and use it to create an account +- name: Get an issue + jira: uri={{server}} username={{user}} password={{pass}} + project=ANS operation=fetch issue="ANS-63" + register: issue + +- name: Create a unix account for the reporter + sudo: true + user: name="{{issue.meta.fields.creator.name}}" comment="{{issue.meta.fields.creator.displayName}}" + +# Transition an issue by target status +- name: Close the issue + jira: uri={{server}} username={{user}} password={{pass}} + issue={{issue.meta.key}} operation=transition status="Done" +""" + +import json +import base64 + +def request(url, user, passwd, data=None, method=None): + if data: + data = json.dumps(data) + + # NOTE: fetch_url uses a password manager, which follows the + # standard request-then-challenge basic-auth semantics. However as + # JIRA allows some unauthorised operations it doesn't necessarily + # send the challenge, so the request occurs as the anonymous user, + # resulting in unexpected results. To work around this we manually + # inject the basic-auth header up-front to ensure that JIRA treats + # the requests as authorized for this user. + auth = base64.encodestring('%s:%s' % (user, passwd)).replace('\n', '') + response, info = fetch_url(module, url, data=data, method=method, + headers={'Content-Type':'application/json', + 'Authorization':"Basic %s" % auth}) + + if info['status'] not in (200, 204): + module.fail_json(msg=info['msg']) + + body = response.read() + + if body: + return json.loads(body) + else: + return {} + +def post(url, user, passwd, data): + return request(url, user, passwd, data=data, method='POST') + +def put(url, user, passwd, data): + return request(url, user, passwd, data=data, method='PUT') + +def get(url, user, passwd): + return request(url, user, passwd) + + +def create(restbase, user, passwd, params): + createfields = { + 'project': { 'key': params['project'] }, + 'summary': params['summary'], + 'description': params['description'], + 'issuetype': { 'name': params['issuetype'] }} + + # Merge in any additional or overridden fields + if params['fields']: + createfields.update(params['fields']) + + data = {'fields': createfields} + + url = restbase + '/issue/' + + ret = post(url, user, passwd, data) + + return ret + + +def comment(restbase, user, passwd, params): + data = { + 'body': params['comment'] + } + + url = restbase + '/issue/' + params['issue'] + '/comment' + + ret = post(url, user, passwd, data) + + return ret + + +def edit(restbase, user, passwd, params): + data = { + 'fields': params['fields'] + } + + url = restbase + '/issue/' + params['issue'] + + ret = put(url, user, passwd, data) + + return ret + + +def fetch(restbase, user, passwd, params): + url = restbase + '/issue/' + params['issue'] + ret = get(url, user, passwd) + return ret + + +def transition(restbase, user, passwd, params): + # Find the transition id + turl = restbase + '/issue/' + params['issue'] + "/transitions" + tmeta = get(turl, user, passwd) + + target = params['status'] + tid = None + for t in tmeta['transitions']: + if t['name'] == target: + tid = t['id'] + break + + if not tid: + raise ValueError("Failed find valid transition for '%s'" % target) + + # Perform it + url = restbase + '/issue/' + params['issue'] + "/transitions" + data = { 'transition': { "id" : tid }, + 'fields': params['fields']} + + ret = post(url, user, passwd, data) + + return ret + + +# Some parameters are required depending on the operation: +OP_REQUIRED = dict(create=['project', 'issuetype', 'summary', 'description'], + comment=['issue', 'comment'], + edit=[], + fetch=['issue'], + transition=['status']) + +def main(): + + global module + module = AnsibleModule( + argument_spec=dict( + uri=dict(required=True), + operation=dict(choices=['create', 'comment', 'edit', 'fetch', 'transition'], + aliases=['command'], required=True), + username=dict(required=True), + password=dict(required=True), + project=dict(), + summary=dict(), + description=dict(), + issuetype=dict(), + issue=dict(aliases=['ticket']), + comment=dict(), + status=dict(), + assignee=dict(), + fields=dict(default={}) + ), + supports_check_mode=False + ) + + op = module.params['operation'] + + # Check we have the necessary per-operation parameters + missing = [] + for parm in OP_REQUIRED[op]: + if not module.params[parm]: + missing.append(parm) + if missing: + module.fail_json(msg="Operation %s require the following missing parameters: %s" % (op, ",".join(missing))) + + # Handle rest of parameters + uri = module.params['uri'] + user = module.params['username'] + passwd = module.params['password'] + if module.params['assignee']: + module.params['fields']['assignee'] = { 'name': module.params['assignee'] } + + if not uri.endswith('/'): + uri = uri+'/' + restbase = uri + 'rest/api/2' + + # Dispatch + try: + + # Lookup the corresponding method for this operation. This is + # safe as the AnsibleModule should remove any unknown operations. + thismod = sys.modules[__name__] + method = getattr(thismod, op) + + ret = method(restbase, user, passwd, module.params) + + except Exception as e: + return module.fail_json(msg=e.message) + + + module.exit_json(changed=True, meta=ret) + + +from ansible.module_utils.basic import * +from ansible.module_utils.urls import * +main()