diff --git a/plugin.video.cc.com/LICENSE.txt b/plugin.video.cc.com/LICENSE.txt
new file mode 100644
index 0000000000..9cecc1d466
--- /dev/null
+++ b/plugin.video.cc.com/LICENSE.txt
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ {one line to give the program's name and a brief idea of what it does.}
+ Copyright (C) {year} {name of author}
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ {project} Copyright (C) {year} {fullname}
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/plugin.video.cc.com/addon.py b/plugin.video.cc.com/addon.py
new file mode 100644
index 0000000000..2b5e03c6b6
--- /dev/null
+++ b/plugin.video.cc.com/addon.py
@@ -0,0 +1,5 @@
+# author: nixxo
+from resources.lib.main import ComedyCentral
+
+cc = ComedyCentral()
+cc.main()
diff --git a/plugin.video.cc.com/addon.xml b/plugin.video.cc.com/addon.xml
new file mode 100644
index 0000000000..48fc6f377f
--- /dev/null
+++ b/plugin.video.cc.com/addon.xml
@@ -0,0 +1,67 @@
+
+
+
+
+
+
+
+
+
+ video
+
+
+ Watch full episodes and clips from your favorite Comedy Central shows.
+ Comedy Central brings you the funniest stuff on the planet. Watch hit shows like Workaholics, Tosh.0, The Daily Show with Trevor Noah, Key and Peele @midnight and Broad City, plus cutting-edge stand-up comedy you won't find anywhere else. Head to CC.com for full episodes, exclusives, previews and more.
+ all
+ GNU GENERAL PUBLIC LICENSE. Version 3, June 2007
+ https://github.com/nixxo/plugin.video.cc.com
+ https://www.cc.com/
+ [B]1.0.0[/B]
+- first release on kodi addons repo
+- final fixes
+
+[B]0.0.7[/B]
+- improved thumbnail setup
+- improved setContent
+- improved info extraction to avoid NoneType errors
+- added offscreen=True to listitem creation
+- created getPlaylist
+- improved settings
+- using f-strings
+- urllib > requests
+- moved videoInfo update before loop
+- addonutils improvements
+
+[B]v0.0.6[/B]
+- improved translations
+- made settings.xml with kodi19 conventions
+- implemented better quality selection
+- added "Full Episodes" menu item
+
+[B]v0.0.5[/B]
+- added inputstream helper option
+- fixed single video title
+- settings cleanup
+
+[B]v0.0.4[/B]
+- using cache to save video info
+- added duration/airdate to videos
+- code improvements and cleanup
+
+[B]v0.0.3[/B]
+- yt-dlp updated to 2021.10.22
+- pull listitem infos to set plot on all playlist elements
+- improved settings and localization
+
+[B]v0.0.2[/B]
+- playlist index fix
+- strptime workaround
+
+[B]v0.0.1[/B]
+- first release
+
+ resources/icon.png
+ resources/fanart.png
+
+
+
diff --git a/plugin.video.cc.com/resources/fanart.png b/plugin.video.cc.com/resources/fanart.png
new file mode 100644
index 0000000000..1e02375162
Binary files /dev/null and b/plugin.video.cc.com/resources/fanart.png differ
diff --git a/plugin.video.cc.com/resources/icon.png b/plugin.video.cc.com/resources/icon.png
new file mode 100644
index 0000000000..60cb455044
Binary files /dev/null and b/plugin.video.cc.com/resources/icon.png differ
diff --git a/plugin.video.cc.com/resources/language/resource.language.en_gb/strings.po b/plugin.video.cc.com/resources/language/resource.language.en_gb/strings.po
new file mode 100644
index 0000000000..4d26c498ad
--- /dev/null
+++ b/plugin.video.cc.com/resources/language/resource.language.en_gb/strings.po
@@ -0,0 +1,115 @@
+# Kodi Media Center language file
+# Addon Name: "Comedy Central"
+# Addon id: plugin.video.cc.com
+# Addon Provider: nixxo
+msgid ""
+msgstr ""
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Plural-Forms: nplurals=2; plural=(n != 1);\n"
+"Language: en\n"
+
+msgctxt "#30000"
+msgid "General"
+msgstr ""
+
+msgctxt "#31000"
+msgid "Playback"
+msgstr ""
+
+msgctxt "#31001"
+msgid "Use InputStream Adaptive"
+msgstr ""
+
+msgctxt "#31002"
+msgid "Manual quality selection"
+msgstr ""
+
+msgctxt "#31003"
+msgid "Select Preferred Maximum Quality"
+msgstr ""
+
+msgctxt "#31004"
+msgid "Low definition - 360p"
+msgstr ""
+
+msgctxt "#31005"
+msgid "Standard definition - 540p"
+msgstr ""
+
+msgctxt "#31006"
+msgid "High definition - 720p"
+msgstr ""
+
+msgctxt "#31007"
+msgid "Full HD - 1080p"
+msgstr ""
+
+msgctxt "#31008"
+msgid "Highest Available"
+msgstr ""
+
+msgctxt "#31010"
+msgid "Debug"
+msgstr ""
+
+msgctxt "#31011"
+msgid "Enable developer mode"
+msgstr ""
+
+msgctxt "#32001"
+msgid "Shows"
+msgstr ""
+
+msgctxt "#32002"
+msgid "Full Episodes"
+msgstr ""
+
+msgctxt "#32003"
+msgid "Standup"
+msgstr ""
+
+msgctxt "#32004"
+msgid "Digital Originals"
+msgstr ""
+
+msgctxt "#32005"
+msgid "Load More"
+msgstr ""
+
+msgctxt "#33001"
+msgid "Url not supported, check log."
+msgstr ""
+
+msgctxt "#33002"
+msgid "No json data found, check log."
+msgstr ""
+
+msgctxt "#33003"
+msgid "Video not available."
+msgstr ""
+
+msgctxt "#33004"
+msgid "Something went wrong, Try again..."
+msgstr ""
+
+msgctxt "#33005"
+msgid "_type not supported. See log."
+msgstr ""
+
+msgctxt "#41001"
+msgid "Use InputStream Adaptive and let it manage quality."
+msgstr ""
+
+msgctxt "#41002"
+msgid "Force maximum quality to InputStream Adaptive as well."
+msgstr ""
+
+msgctxt "#41003"
+msgid "Maximum quality to be used by the plugin."
+msgstr ""
+
+msgctxt "#41011"
+msgid "Enable developer mode to increase verbosity of logging and set everything to LOG_INFO level and avoid enableing Kodi Debug mode."
+msgstr ""
diff --git a/plugin.video.cc.com/resources/lib/addonutils.py b/plugin.video.cc.com/resources/lib/addonutils.py
new file mode 100644
index 0000000000..8f7abaf8e5
--- /dev/null
+++ b/plugin.video.cc.com/resources/lib/addonutils.py
@@ -0,0 +1,163 @@
+import os
+import sys
+from urllib.parse import parse_qsl
+from urllib.parse import urlencode
+
+import xbmc
+import xbmcaddon
+import xbmcgui
+import xbmcplugin
+import xbmcvfs
+
+ADDON = xbmcaddon.Addon()
+ID = ADDON.getAddonInfo('id')
+NAME = ADDON.getAddonInfo('name')
+VERSION = ADDON.getAddonInfo('version')
+ICON = ADDON.getAddonInfo('icon')
+FANART = ADDON.getAddonInfo('fanart')
+PATH = ADDON.getAddonInfo('path')
+DATA_PATH = ADDON.getAddonInfo('profile')
+PATH_T = xbmcvfs.translatePath(PATH)
+DATA_PATH_T = xbmcvfs.translatePath(DATA_PATH)
+IMAGE_PATH_T = os.path.join(PATH_T, 'resources', 'media')
+LANGUAGE = ADDON.getLocalizedString
+KODILANGUAGE = xbmc.getLocalizedString
+
+HANDLE = int(sys.argv[1])
+
+
+def executebuiltin(func, block=False):
+ xbmc.executebuiltin(func, block)
+
+
+def notify(msg):
+ xbmcgui.Dialog().notification(NAME, msg, ICON)
+
+
+def log(msg, level=xbmc.LOGDEBUG):
+ # DEBUG = 0, INFO = 1, WARNING = 2, ERROR = 3, FATAL = 4
+ xbmc.log(f"[{ID}/{VERSION}] {msg}", level=level)
+
+
+def getParams():
+ if not sys.argv[2]:
+ return {}
+ return dict(parse_qsl(sys.argv[2][1:]))
+
+
+def parameters(p, host=sys.argv[0]):
+ for k, v in list(p.items()):
+ if v:
+ p[k] = v
+ else:
+ p.pop(k, None)
+ return f"{host}?{urlencode(p)}"
+
+
+def getSetting(setting):
+ return ADDON.getSetting(setting).strip()
+
+
+def getSettingAsBool(setting):
+ return getSetting(setting).lower() == 'true'
+
+
+def getSettingAsNum(setting):
+ num = 0
+ try:
+ num = float(getSetting(setting))
+ except ValueError:
+ pass
+ return num
+
+
+def getSettingAsInt(setting):
+ return int(getSettingAsNum(setting))
+
+
+def setSetting(setting, value):
+ ADDON.setSetting(id=setting, value=str(value))
+
+
+def showOkDialog(line, heading=NAME):
+ xbmcgui.Dialog().ok(heading, line)
+
+
+def createListItem(
+ label='', params=None, label2=None,
+ thumb=None, fanart=None, poster=None, arts={},
+ videoInfo=None, properties={}, isFolder=True,
+ path=None, subs=None):
+ item = xbmcgui.ListItem(label, label2, path, offscreen=True)
+ if thumb:
+ arts['thumb'] = thumb
+ if fanart:
+ arts['fanart'] = fanart
+ if poster:
+ arts['poster'] = poster
+ item.setArt(arts)
+ item.setInfo('video', videoInfo)
+ if subs is not None:
+ item.setSubtitles(subs)
+ if not isFolder:
+ properties['IsPlayable'] = 'true'
+ for key, value in list(properties.items()):
+ item.setProperty(key, value)
+ return item
+
+
+def addListItem(
+ label='', params=None, label2=None,
+ thumb=None, fanart=None, poster=None, arts={},
+ videoInfo=None, properties={}, isFolder=True,
+ path=None, subs=None):
+ if isinstance(params, dict):
+ url = parameters(params)
+ else:
+ url = params
+ item = createListItem(
+ label=label, params=params, label2=label2,
+ thumb=thumb, fanart=fanart, poster=poster, arts=arts,
+ videoInfo=videoInfo, properties=properties, isFolder=isFolder,
+ path=path, subs=subs)
+ return xbmcplugin.addDirectoryItem(
+ handle=HANDLE, url=url, listitem=item, isFolder=isFolder)
+
+
+def getPlaylist(type=xbmc.PLAYLIST_VIDEO, clear=True):
+ plst = xbmc.PlayList(type)
+ if clear:
+ plst.clear()
+ xbmc.sleep(200)
+ return plst
+
+
+def setResolvedUrl(
+ url='', solved=True, headers=None, subs=None,
+ item=None, exit=True):
+ headerUrl = ''
+ if headers:
+ headerUrl = urlencode(headers)
+ item = xbmcgui.ListItem(
+ path=f"{url}|{headerUrl}", offscreen=True) if item is None else item
+ if subs is not None:
+ item.setSubtitles(subs)
+ xbmcplugin.setResolvedUrl(HANDLE, solved, item)
+ if exit:
+ sys.exit(0)
+
+
+def setContent(ctype='videos'):
+ xbmcplugin.setContent(HANDLE, ctype)
+
+
+def endScript(message=None, loglevel=2, closedir=True, exit=True):
+ if message:
+ log(message, loglevel)
+ if closedir:
+ xbmcplugin.endOfDirectory(handle=HANDLE, succeeded=True)
+ if exit:
+ sys.exit(0)
+
+
+log(f"Starting with command \"{sys.argv[2]}\"", 1)
diff --git a/plugin.video.cc.com/resources/lib/comedycentral.py b/plugin.video.cc.com/resources/lib/comedycentral.py
new file mode 100644
index 0000000000..ba45309c80
--- /dev/null
+++ b/plugin.video.cc.com/resources/lib/comedycentral.py
@@ -0,0 +1,614 @@
+import datetime
+import re
+import json
+import requests
+
+from simplecache import SimpleCache
+
+from resources.lib import addonutils
+from resources.lib.translate import translatedString as T
+
+
+TIMEOUT = 15
+QUALITY = addonutils.getSettingAsInt('Quality')
+QUALITIES = [360, 540, 720, 1080, 9999]
+DEVMODE = addonutils.getSettingAsBool('DevMode')
+BASE_URL = 'https://www.cc.com'
+BASE_MGID = 'mgid:arc:video:comedycentral.com:'
+PAGES_CRUMB = ['topic', 'collections', 'shows']
+LANG = addonutils.LANGUAGE
+MAIN_MENU = [{
+ 'label': T('shows'),
+ 'params': {
+ 'url': f"{BASE_URL}/api/shows/1/40",
+ 'mode': 'SHOWS',
+ },
+}, {
+ 'label': T('full.episodes'),
+ 'params': {
+ 'url': f"{BASE_URL}/api/episodes/1/40",
+ 'mode': 'EPISODES',
+ },
+}, {
+ 'label': T('standup'),
+ 'params': {
+ 'url': f"{BASE_URL}/topic/stand-up",
+ 'mode': 'GENERIC',
+ 'name': T('standup'),
+ },
+}, {
+ 'label': T('digital.original'),
+ 'params': {
+ 'url': f"{BASE_URL}/topic/digital-originals",
+ 'mode': 'GENERIC',
+ 'name': T('digital.original'),
+ },
+}]
+
+
+class CC(object):
+
+ def __init__(self):
+ self._log('__init__')
+ self.cache = SimpleCache()
+
+ def _log(self, msg, level=0):
+ """
+ Log message
+ If DEVMODE is enabled, all debug messages are raised to INFO,
+ so everithing from the plugin is visible without
+ activating Debug Log in Kodi.
+
+ :param msg: The message
+ :type msg: str
+ :param level: loglevel
+ :type level: int
+ """
+ if DEVMODE:
+ addonutils.log(msg, 1 if level == 0 else level)
+ elif level >= 3:
+ addonutils.log(msg, level)
+
+ def _openURL(self, url, hours=24):
+ """
+ Get url content from cache or from source
+ depending if cache is available or not.
+
+ :param url: The url
+ :type url: str
+ :param hours: cache retention period in hours
+ :type hours: int
+
+ :returns: url content
+ :rtype: str
+ """
+ self._log(f"openURL, url = {url}", 1)
+ try:
+ cacheresponse = self.cache.get(
+ f"{addonutils.ID}._openURL, url = {url}")
+ if not cacheresponse:
+ self._log('openURL, no cache found')
+ response = requests.get(url, timeout=TIMEOUT)
+ if response.status_code == requests.codes.ok:
+ response.encoding = 'utf-8'
+ self.cache.set(
+ f"{addonutils.ID}._openURL, url = {url}",
+ response.text,
+ expiration=datetime.timedelta(hours=hours))
+ else:
+ response.raise_for_status()
+ return self.cache.get(f"{addonutils.ID}._openURL, url = {url}")
+ except Exception as e:
+ self.cache = None
+ self._log(f"openURL Failed! {e}", 3)
+ addonutils.notify(T('error.openurl'))
+ addonutils.endScript()
+
+ def _createURL(self, url, fix=False):
+ """
+ Check if url is full or only partial
+
+ :param url: The url
+ :type url: str
+ :param fix: fix the url
+ :type fix: bool
+
+ :returns: fixed url
+ :rtype: str
+ """
+ if fix:
+ # sometimes cc.com f**ks-up the url
+ url = url.replace('/episode/', '/episodes/')
+
+ if url.startswith('http'):
+ return url
+ return f"{BASE_URL}{url}"
+
+ def _createInfoArt(self, image=False, fanart=False):
+ """
+ Create infoart list from provided image url, if provided.
+
+ :param image: image url
+ :type image: str
+ :param fanart: generate fanart from image url
+ :type fanart: bool
+
+ :returns: infoart
+ :rtype: list
+ """
+ self._log(f"_createInfoArt, image = {image}; fanart = {fanart}", 1)
+ thumb = f"{image}&width=512&crop=false" if image else None
+ return {
+ 'thumb': thumb,
+ 'poster': thumb,
+ 'fanart': (image or thumb) if fanart else addonutils.FANART,
+ 'icon': addonutils.ICON,
+ 'logo': addonutils.ICON
+ }
+
+ def _loadJsonData(self, url, hours=24):
+ """
+ Extract the JSON data from the provided url.
+ Checks if the url contain html or json
+
+ :param url: The url with the data to extract
+ :type url: str
+ :param hours: cache retention duration
+ :type hours: int
+
+ :returns: Json data extarcted
+ :rtype: json
+ """
+ self._log(f"_loadJsonData, url = {url}", 1)
+ response = self._openURL(url, hours=hours)
+ if len(response) == 0:
+ return
+
+ try:
+ # check if the file is json
+ items = json.loads(response)
+ except:
+ # file is html
+ try:
+ src = re.search('__DATA__\\s*=\\s*(.+?);\\s*window\\.__PUSH_STATE__', response).group(1)
+ items = json.loads(src)
+ except Exception as e:
+ addonutils.notify(T('error.no.json'))
+ self._log(f"_loadJsonData, NO JSON DATA FOUND: {e}", 3)
+ addonutils.endScript()
+
+ return items
+
+ def _extractItemType(self, data, type, ext):
+ """
+ Search for element with the 'type' provided and return 'ext'
+ Eg. return the "children" element of an element with 'type' "MainContent"
+
+ :param data: The data
+ :type data: json
+ :param type: 'type' key to search
+ :type type: str
+ :param ext: 'ext' key to extract
+ :type ext: str
+
+ :returns: extracted data
+ :rtype: json
+ """
+ self._log(f"_extractItemType, type = {type}; ext = {ext}", 1)
+ items = [x.get(ext) for x in data if x.get('type') == type]
+ return items[0] if len(items) > 0 and isinstance(items[0], list) else items
+
+ def _extractItems(self, data):
+ """
+ extract items from the json data
+
+ :param data: The data
+ :type data: json
+
+ :returns: items extracted
+ :rtype: list
+ """
+ self._log('_extractItems')
+ items = []
+ for item in data or []:
+ if item.get('type') == 'LineList':
+ items.extend(item['props'].get('items') or [])
+ items.extend([item['props'].get('loadMore')] or [])
+ if item.get('type') == 'Fragment':
+ items.extend(self._extractItems(item.get('children')) or [])
+ self._log(f"_extractItems, items extracted = {len(items)}", 1)
+ return items
+
+ def _getDuration(self, duration):
+ """
+ Parse the duration in format [hh:]mm:ss and return in seconds
+
+ :param duration: The duration
+ :type duration: int
+ """
+ try:
+ hh, mm, ss = re.match(r'(?:(\d+):)?(\d+):(\d+)', duration).groups()
+ except:
+ hh, mm, ss = '0,0,0'.split(',')
+ return int(hh or 0) * 3600 + int(mm or 0) * 60 + int(ss or 0)
+
+ def _getDate(self, date):
+ """
+ Parses the date in the format MM/DD/YYYY and returns it in
+ the format YYYY-MM-DD
+
+ :param date: The date
+ :type date: str
+ """
+ try:
+ mm, dd, yy = re.match(r'(\d{2})/(\d{2})/(\d{4})', date).groups()
+ except:
+ mm, dd, yy = '01,01,2000'.split(',')
+ return f"{yy}-{mm}-{dd}"
+
+ def getMainMenu(self):
+ """
+ Returns the main menu
+
+ :returns: main menu
+ :rtype: json
+ """
+ self._log('getMainMenu', 1)
+ return MAIN_MENU
+
+ def showsList(self, url):
+ """
+ Generates a list of the TV Shows found at the provided url
+
+ :param url: The url
+ :type url: str
+
+ :returns: listitem items
+ :rtype: dict
+ """
+ self._log(f"showsList, url = {url}", 1)
+ items = self._loadJsonData(url)
+ if 'items' in items:
+ items['items'].extend([items.get('loadMore')] or [])
+ items = items['items']
+ else:
+ items = self._extractItemType(
+ items.get('children') or [],
+ 'MainContainer',
+ 'children')
+ items = self._extractItems(items)
+
+ for item in items:
+ if not item:
+ continue
+ if 'loadingTitle' in item:
+ # NEXT PAGE
+ yield {
+ 'label': T('load.more'),
+ 'params': {
+ 'mode': 'SHOWS',
+ 'url': self._createURL(item['url'])
+ },
+ }
+ else:
+ label = item['meta']['header']['title']
+ yield {
+ 'label': label,
+ 'params': {
+ 'mode': 'GENERIC',
+ 'url': self._createURL(item['url']),
+ 'name': label,
+ },
+ 'videoInfo': {
+ 'mediatype': 'tvshow',
+ 'title': label,
+ 'tvshowtitle': label,
+ },
+ 'arts': self._createInfoArt(item['media']['image']['url'], False),
+ }
+
+ def genericList(self, name, url):
+ """
+ Checks the url and chooses the appropriate method to parse the content.
+ Based on the PAGES_CRUMB it yields data from the corresponding loadXXX.
+
+ loadShows
+ loadCollections (same as Topic)
+ loadTopic
+
+ :param name: title of the url provided
+ :type name: str
+ :param url: url to process
+ :type url: str
+ """
+ self._log(f"genericList, name = {name}, url = {url}", 1)
+ try:
+ mtc = re.search(r'(/%s/)' % '/|/'.join(PAGES_CRUMB), url).group(1)
+ name_of_method = "load%s" % mtc.strip('/').capitalize()
+ method = getattr(self, name_of_method)
+ self._log(f"genericList, using method = {method}")
+ yield from method(name, url)
+ except Exception as e:
+ addonutils.notify(T('error.openurl'))
+ self._log(f"genericList, URL not supported: {url}", 3)
+ self._log(f"error: {e}", 3)
+ addonutils.endScript()
+
+ def loadShows(self, name, url, season=False):
+ self._log(f"loadShows, name = {name}, url = {url}, season = {season}", 1)
+ items = self._loadJsonData(url)
+ if not season:
+ items = self._extractItemType(
+ items.get('children') or [], 'MainContainer', 'children')
+ items = self._extractItemType(items, 'SeasonSelector', 'props')
+ # check if no season selector is present
+ # or season selector is empty
+ if len(items) == 0 or (
+ len(items[0]['items']) == 1 and not items[0]['items'][0].get('url')):
+ # and load directly the show
+ yield from self.loadShows(name, url, True)
+ return
+ else:
+ items = self._extractItemType(
+ items.get('children') or [], 'MainContainer', 'children')
+ items = self._extractItemType(items, 'LineList', 'props')
+ items = self._extractItemType(items, 'video-guide', 'filters')
+
+ items = items[0].get('items')
+ # check if there is only one item
+ if len(items) == 1:
+ # and load it directly
+ yield from self.loadItems(
+ name, self._createURL(items[0].get('url') or url))
+ else:
+ for item in items:
+ label = item['label']
+ yield {
+ 'label': label,
+ 'params': {
+ 'mode': 'EPISODES' if season else 'SEASON',
+ 'url': self._createURL(item.get('url') or url),
+ 'name': name,
+ },
+ 'videoInfo': {
+ 'mediatype': 'season' if re.search(r'season\s\d+', label, re.IGNORECASE) else 'video',
+ 'title': label,
+ 'tvshowtitle': name
+ },
+ 'arts': self._createInfoArt(),
+ }
+
+ def loadCollections(self, name, url):
+ """ Collections page are the same as topic pages (for now)"""
+ yield from self.loadTopic(name, url)
+ pass
+
+ def loadTopic(self, name, url):
+ """
+ Loads data from 'topic' pages.
+
+ :param name: Title of the page
+ :type name: str
+ :param url: The url
+ :type url: str
+ """
+ self._log(f"loadTopic, name = {name}, url = {url}")
+ items = self._loadJsonData(url)
+ items = self._extractItemType(
+ items.get('children') or [],
+ 'MainContainer',
+ 'children')
+ for item in self._extractItems(items) or []:
+ if not item:
+ continue
+ if 'loadingTitle' in item:
+ yield {
+ 'label': T('load.more'),
+ 'params': {
+ 'mode': 'EPISODES',
+ 'url': self._createURL(item['url']),
+ 'name': name,
+ },
+ 'arts': self._createInfoArt(),
+ }
+
+ # skip non necessary elements, like ADS and others
+ if item.get('cardType') not in ['series', 'episode', 'promo']:
+ continue
+
+ # skip 'promo' items in Digital Original listing
+ # as they are duplicates of something already in the list
+ if name == T('digital.original') and item.get('cardType') == 'promo':
+ continue
+
+ if not item.get('url') or not item.get('title'):
+ continue
+
+ label = item['title']
+ # playable is determined by the url not being in the parsable pages
+ playable = not any((f"/{x}/") in item['url'] for x in PAGES_CRUMB)
+ media = item.get('media') or {}
+ image = media.get('image') or {}
+ infos = {
+ 'label': label,
+ 'params': {
+ 'mode': 'PLAY' if playable else 'GENERIC',
+ 'url': self._createURL(item['url'], fix=playable),
+ 'name': label,
+ },
+ 'videoInfo': {
+ 'mediatype': 'video' if playable else 'tvshow',
+ 'title': label,
+ 'tvshowtitle': item['meta']['label'],
+ 'duration': self._getDuration(media.get('duration')),
+ },
+ 'arts': self._createInfoArt(image.get('url')),
+ 'playable': playable,
+ }
+
+ if playable:
+ self.cache.set(
+ f"{addonutils.ID}_videoInfo[{infos['params']['url']}]",
+ [infos['videoInfo'], infos['arts']],
+ expiration=datetime.timedelta(hours=2),
+ json_data=True)
+ yield infos
+
+ def loadItems(self, name, url):
+ """
+ Generate a list of playable items from the provided url
+
+ :param name: The name
+ :type name: str
+ :param url: The url
+ :type url: str
+
+ :returns: items
+ :rtype: list
+ """
+ self._log(f"loadItems, name = {name}, url = {url}")
+ items = self._loadJsonData(url, hours=1)
+ for item in items.get('items') or []:
+ if item.get('cardType') == 'ad':
+ continue
+ meta = item.get('meta')
+ try:
+ sub = meta.get('subHeader')
+ if isinstance(meta['header']['title'], str):
+ label = meta['header']['title']
+ else:
+ label = meta['header']['title'].get('text') or ''
+ label = f"{label} - {sub}" if sub else label
+ except:
+ label = 'NO TITLE'
+ try:
+ season, episode = re.search(
+ r'season\s*(\d+)\s*episode\s*(\d+)\s*',
+ meta.get('itemAriaLabel') or meta.get('ariaLabel'),
+ re.IGNORECASE).groups()
+ except:
+ season, episode = None, None
+
+ tvshowtitle = name or meta.get('label')
+ media = item.get('media') or {}
+ image = media.get('image') or {}
+ infos = {
+ 'label': label,
+ 'params': {
+ 'mode': 'PLAY',
+ 'url': self._createURL(item['url']),
+ 'name': sub or label,
+ 'mgid': item.get('mgid') or item.get('id'),
+ },
+ 'videoInfo': {
+ 'mediatype': 'episode' if episode else 'video',
+ 'title': sub or label,
+ 'tvshowtitle': tvshowtitle,
+ 'plot': meta.get('description'),
+ 'season': season,
+ 'episode': episode,
+ 'duration': self._getDuration(media.get('duration')),
+ 'aired': self._getDate(meta.get('date')),
+ },
+ 'arts': self._createInfoArt(image.get('url')),
+ 'playable': True,
+ }
+ self.cache.set(
+ f"{addonutils.ID}_videoInfo[{infos['params']['url']}]",
+ [infos['videoInfo'], infos['arts']],
+ expiration=datetime.timedelta(hours=2),
+ json_data=True)
+ yield infos
+
+ if items.get('loadMore'):
+ yield {
+ 'label': T('load.more'),
+ 'params': {
+ 'mode': 'EPISODES',
+ # replace necessary to urlencode only ":"
+ 'url': self._createURL(items['loadMore']['url'].replace(':', '%3A')),
+ 'name': name,
+ },
+ 'arts': self._createInfoArt(),
+ }
+
+ def getMediaUrl(self, name, url, mgid=None, select_quality=False):
+ """
+ Retrive media urls with yt-dlp for the provided url or mgid
+
+ :param name: Title
+ :type name: str
+ :param url: The url
+ :type url: str
+ :param mgid: The mgid
+ :type mgid: str
+
+ :returns: playable urls
+ :rtype: list
+ """
+ from resources.lib import yt_dlp
+
+ self._log(f"getMediaUrl, url = {url}, mgid = {mgid}")
+ self._log(f"yt-dlp version: {yt_dlp.version.__version__}")
+ if mgid and not mgid.startswith('mgid'):
+ mgid = f"{BASE_MGID}{mgid}"
+
+ ytInfo = self.cache.get(
+ f"{addonutils.ID}_ytInfo[{mgid or url}]")
+ videoInfo = self.cache.get(
+ f"{addonutils.ID}_videoInfo[{url}]", json_data=True
+ ) or [{},{}]
+
+ if not ytInfo:
+ try:
+ ytInfo = yt_dlp.YoutubeDL().extract_info(mgid or url)
+ self.cache.set(
+ f"{addonutils.ID}_ytInfo[{mgid or url}]", ytInfo,
+ expiration=datetime.timedelta(hours=2))
+ except:
+ ytInfo = None
+
+ if ytInfo is None:
+ addonutils.notify(T('error.no.video'))
+ self._log('getMediaUrl, ydl.extract_info=None', 3)
+ addonutils.endScript(exit=False)
+ if ytInfo.get('_type') != 'playlist':
+ addonutils.notify(T('error.wrong.type'))
+ self._log(f"getPlayItems, info type <{ytInfo['_type']}> not supported", 3)
+ addonutils.endScript(exit=False)
+
+ for video in ytInfo.get('entries') or []:
+ vidIDX = video.get('playlist_index') or video.get('playlist_autonumber')
+ label = f"{name} - Act {vidIDX}" if video.get('n_entries') > 1 else name
+ subs = None
+ try:
+ if 'subtitles' in video:
+ subs = [x['url'] for x in video['subtitles'].get('en', '')
+ if 'url' in x and x['ext'] == 'vtt']
+ except:
+ pass
+
+ videoInfo[0].update({
+ 'title': label,
+ 'duration': video.get('duration'),
+ })
+ if video.get('thumbnail'):
+ videoInfo[1].update(self._createInfoArt(video['thumbnail']))
+
+ infos = {
+ 'idx': vidIDX-1,
+ 'url': video.get('url'),
+ 'label': label,
+ 'videoInfo': videoInfo[0],
+ 'arts': videoInfo[1],
+ 'subs': subs,
+ }
+
+ if select_quality:
+ max_height = QUALITIES[QUALITY]
+ for i in range(len(video.get('formats'))-1, 0, -1):
+ if video['formats'][i].get('height') <= max_height:
+ self._log(f"getPlaylistContent, quality_found = {video['formats'][i].get('format_id')}")
+ infos['url'] = video['formats'][i].get('url')
+ break
+ yield infos
diff --git a/plugin.video.cc.com/resources/lib/main.py b/plugin.video.cc.com/resources/lib/main.py
new file mode 100644
index 0000000000..eace814a52
--- /dev/null
+++ b/plugin.video.cc.com/resources/lib/main.py
@@ -0,0 +1,83 @@
+from resources.lib import addonutils
+from resources.lib.comedycentral import CC
+
+
+class ComedyCentral(object):
+
+ def __init__(self):
+ self.cc = CC()
+ self._ISA = addonutils.getSettingAsBool('UseInputStream')
+ self._FISA = addonutils.getSettingAsBool('ForceInputstream')
+
+ def addItems(self, items):
+ media_type = []
+ for item in items or []:
+ if item.get('videoInfo'):
+ media_type.append(item['videoInfo'].get('mediatype'))
+ addonutils.addListItem(
+ label=item.get('label'),
+ label2=item.get('label2'),
+ params=item.get('params'),
+ arts=item.get('arts'),
+ videoInfo=item.get('videoInfo'),
+ isFolder=False if item.get('playable') else True,
+ )
+
+ media_type = list(set(media_type))
+ if len(media_type) == 1:
+ addonutils.setContent(f"{media_type[0]}s")
+
+
+ def main(self):
+ params = addonutils.getParams()
+ if 'mode' in params:
+ if params['mode'] == 'SHOWS':
+ shows = self.cc.showsList(params['url'])
+ self.addItems(shows)
+ addonutils.setContent('tvshows')
+
+ elif params['mode'] == 'GENERIC':
+ generic = self.cc.genericList(params.get('name'), params['url'])
+ self.addItems(generic)
+
+ elif params['mode'] == 'SEASON':
+ show = self.cc.loadShows(params.get('name'), params['url'], True)
+ self.addItems(show)
+
+ elif params['mode'] == 'EPISODES':
+ episodes = self.cc.loadItems(params.get('name'), params['url'])
+ self.addItems(episodes)
+ addonutils.setContent('episodes')
+
+ elif params['mode'] == 'PLAY':
+ select_quality = not self._ISA or (self._ISA and self._FISA)
+ playItems = self.cc.getMediaUrl(
+ params['name'], params['url'],
+ params.get('mgid'), select_quality)
+ plst = addonutils.getPlaylist()
+
+ for item in playItems:
+ vidIDX = item['idx']
+ liz = addonutils.createListItem(
+ label=item['label'], path=item['url'],
+ videoInfo=item['videoInfo'], subs=item.get('subs'),
+ arts=item.get('arts'), isFolder=False)
+ if self._ISA:
+ import inputstreamhelper
+ is_helper = inputstreamhelper.Helper('hls')
+ if is_helper.check_inputstream():
+ liz.setContentLookup(False)
+ liz.setMimeType('application/vnd.apple.mpegurl')
+ liz.setProperty('inputstream', is_helper.inputstream_addon)
+ liz.setProperty('inputstream.adaptive.manifest_type', 'hls')
+ if vidIDX == 0:
+ addonutils.setResolvedUrl(item=liz, exit=False)
+ plst.add(item['url'], liz, vidIDX)
+ plst.unshuffle()
+
+ else:
+ menu = self.cc.getMainMenu()
+ self.addItems(menu)
+
+ self.cc = None
+ addonutils.endScript(exit=False)
diff --git a/plugin.video.cc.com/resources/lib/translate.py b/plugin.video.cc.com/resources/lib/translate.py
new file mode 100644
index 0000000000..499e49756d
--- /dev/null
+++ b/plugin.video.cc.com/resources/lib/translate.py
@@ -0,0 +1,23 @@
+from resources.lib.addonutils import LANGUAGE
+from resources.lib.addonutils import log
+
+T_MAP = {
+ 'shows': 32001,
+ 'full.episodes': 32002,
+ 'standup': 32003,
+ 'digital.original': 32004,
+ 'load.more': 32005,
+ 'error.openurl': 33001,
+ 'error.no.json': 33002,
+ 'error.no.video': 33003,
+ 'error.generic': 33004,
+ 'error.wrong.type': 33005,
+}
+
+
+def translatedString(id):
+ t_string = T_MAP.get(id)
+ if t_string:
+ return LANGUAGE(t_string)
+ log(f"{id} translation ID not found.", 3)
+ return 'NO TRANSLATION AVAILABLE'
diff --git a/plugin.video.cc.com/resources/lib/yt_dlp/LICENSE b/plugin.video.cc.com/resources/lib/yt_dlp/LICENSE
new file mode 100644
index 0000000000..68a49daad8
--- /dev/null
+++ b/plugin.video.cc.com/resources/lib/yt_dlp/LICENSE
@@ -0,0 +1,24 @@
+This is free and unencumbered software released into the public domain.
+
+Anyone is free to copy, modify, publish, use, compile, sell, or
+distribute this software, either in source code form or as a compiled
+binary, for any purpose, commercial or non-commercial, and by any
+means.
+
+In jurisdictions that recognize copyright laws, the author or authors
+of this software dedicate any and all copyright interest in the
+software to the public domain. We make this dedication for the benefit
+of the public at large and to the detriment of our heirs and
+successors. We intend this dedication to be an overt act of
+relinquishment in perpetuity of all present and future rights to this
+software under copyright law.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+OTHER DEALINGS IN THE SOFTWARE.
+
+For more information, please refer to
diff --git a/plugin.video.cc.com/resources/lib/yt_dlp/YoutubeDL.py b/plugin.video.cc.com/resources/lib/yt_dlp/YoutubeDL.py
new file mode 100644
index 0000000000..3c176597b0
--- /dev/null
+++ b/plugin.video.cc.com/resources/lib/yt_dlp/YoutubeDL.py
@@ -0,0 +1,1647 @@
+#!/usr/bin/env python3
+# coding: utf-8
+
+from __future__ import absolute_import, unicode_literals
+
+import collections
+import copy
+import datetime
+import errno
+import functools
+import io
+import itertools
+import json
+import os
+import re
+import sys
+import tempfile
+import time
+import tokenize
+import traceback
+import random
+
+
+from .compat import (
+ compat_basestring,
+ compat_numeric_types,
+ compat_str,
+ compat_tokenize_tokenize,
+ compat_urllib_error,
+ compat_urllib_request,
+ compat_urllib_request_DataHandler,
+)
+from .utils import (
+ args_to_str,
+ determine_ext,
+ DownloadError,
+ encode_compat_str,
+ EntryNotInPlaylist,
+ error_to_compat_str,
+ ExistingVideoReached,
+ ExtractorError,
+ format_field,
+ GeoRestrictedError,
+ HEADRequest,
+ int_or_none,
+ LazyList,
+ locked_file,
+ make_HTTPS_handler,
+ MaxDownloadsReached,
+ network_exceptions,
+ orderedSet,
+ PagedList,
+ PerRequestProxyHandler,
+ PostProcessingError,
+ register_socks_protocols,
+ RejectedVideoReached,
+ replace_extension,
+ SameFileError,
+ sanitize_filename,
+ sanitize_url,
+ sanitized_Request,
+ std_headers,
+ STR_FORMAT_RE_TMPL,
+ STR_FORMAT_TYPES,
+ str_or_none,
+ strftime_or_none,
+ supports_terminal_sequences,
+ TERMINAL_SEQUENCES,
+ ThrottledDownload,
+ traverse_obj,
+ try_get,
+ UnavailableVideoError,
+ url_basename,
+ variadic,
+ write_string,
+ YoutubeDLCookieProcessor,
+ YoutubeDLHandler,
+ YoutubeDLRedirectHandler,
+)
+from .extractor import (
+ gen_extractor_classes,
+ get_info_extractor,
+)
+
+
+class YoutubeDL(object):
+ _NUMERIC_FIELDS = set((
+ 'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
+ 'timestamp', 'release_timestamp',
+ 'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
+ 'average_rating', 'comment_count', 'age_limit',
+ 'start_time', 'end_time',
+ 'chapter_number', 'season_number', 'episode_number',
+ 'track_number', 'disc_number', 'release_year',
+ ))
+
+ _format_selection_exts = {
+ 'audio': {'m4a', 'mp3', 'ogg', 'aac'},
+ 'video': {'mp4', 'flv', 'webm', '3gp'},
+ 'storyboards': {'mhtml'},
+ }
+
+ params = None
+ _ies = {}
+ _pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
+ _printed_messages = set()
+ _first_webpage_request = True
+ _download_retcode = None
+ _num_downloads = None
+ _playlist_level = 0
+ _playlist_urls = set()
+ _screen_file = None
+
+ def __init__(self, params=None, auto_init=True):
+ """Create a FileDownloader object with the given options.
+ @param auto_init Whether to load the default extractors and print header (if verbose).
+ Set to 'no_verbose_header' to not ptint the header
+ """
+ if params is None:
+ params = {}
+ self._ies = {}
+ self._ies_instances = {}
+ self._pps = {'pre_process': [], 'before_dl': [], 'after_move': [], 'post_process': []}
+ self._printed_messages = set()
+ self._first_webpage_request = True
+ self._post_hooks = []
+ self._progress_hooks = []
+ self._postprocessor_hooks = []
+ self._download_retcode = 0
+ self._num_downloads = 0
+ self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
+ self._err_file = sys.stderr
+ self.params = params
+
+ if sys.version_info < (3, 6):
+ self.report_warning(
+ 'Python version %d.%d is not supported! Please update to Python 3.6 or above' % sys.version_info[:2])
+
+ for msg in self.params.get('warnings', []):
+ self.report_warning(msg)
+
+ if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
+ # nooverwrites was unnecessarily changed to overwrites
+ # in 0c3d0f51778b153f65c21906031c2e091fcfb641
+ # This ensures compatibility with both keys
+ self.params['overwrites'] = not self.params['nooverwrites']
+ elif self.params.get('overwrites') is None:
+ self.params.pop('overwrites', None)
+ else:
+ self.params['nooverwrites'] = not self.params['overwrites']
+
+ if (sys.platform != 'win32'
+ and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
+ and not params.get('restrictfilenames', False)):
+ # Unicode filesystem API will throw errors (#1474, #13027)
+ self.report_warning(
+ 'Assuming --restrict-filenames since file system encoding '
+ 'cannot encode all characters. '
+ 'Set the LC_ALL environment variable to fix this.')
+ self.params['restrictfilenames'] = True
+
+ self.format_selector = None
+
+ self._setup_opener()
+
+ if auto_init:
+ self.add_default_info_extractors()
+
+ for ph in self.params.get('post_hooks', []):
+ self.add_post_hook(ph)
+
+ for ph in self.params.get('progress_hooks', []):
+ self.add_progress_hook(ph)
+
+ register_socks_protocols()
+
+ def preload_download_archive(fn):
+ """Preload the archive, if any is specified"""
+ if fn is None:
+ return False
+ self.write_debug('Loading archive file %r\n' % fn)
+ try:
+ with locked_file(fn, 'r', encoding='utf-8') as archive_file:
+ for line in archive_file:
+ self.archive.add(line.strip())
+ except IOError as ioe:
+ if ioe.errno != errno.ENOENT:
+ raise
+ return False
+ return True
+
+ self.archive = set()
+ preload_download_archive(self.params.get('download_archive'))
+
+ def warn_if_short_id(self, argv):
+ # short YouTube ID starting with dash?
+ idxs = [
+ i for i, a in enumerate(argv)
+ if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
+ if idxs:
+ correct_argv = (
+ ['yt-dlp']
+ + [a for i, a in enumerate(argv) if i not in idxs]
+ + ['--'] + [argv[i] for i in idxs]
+ )
+ self.report_warning(
+ 'Long argument string detected. '
+ 'Use -- to separate parameters and URLs, like this:\n%s\n' %
+ args_to_str(correct_argv))
+
+ def add_info_extractor(self, ie):
+ """Add an InfoExtractor object to the end of the list."""
+ ie_key = ie.ie_key()
+ self._ies[ie_key] = ie
+ if not isinstance(ie, type):
+ self._ies_instances[ie_key] = ie
+ ie.set_downloader(self)
+
+ def _get_info_extractor_class(self, ie_key):
+ ie = self._ies.get(ie_key)
+ if ie is None:
+ ie = get_info_extractor(ie_key)
+ self.add_info_extractor(ie)
+ return ie
+
+ def get_info_extractor(self, ie_key):
+ """
+ Get an instance of an IE with name ie_key, it will try to get one from
+ the _ies list, if there's no instance it will create a new one and add
+ it to the extractor list.
+ """
+ ie = self._ies_instances.get(ie_key)
+ if ie is None:
+ ie = get_info_extractor(ie_key)()
+ self.add_info_extractor(ie)
+ return ie
+
+ def add_default_info_extractors(self):
+ """
+ Add the InfoExtractors returned by gen_extractors to the end of the list
+ """
+ for ie in gen_extractor_classes():
+ self.add_info_extractor(ie)
+
+ def add_post_hook(self, ph):
+ """Add the post hook"""
+ self._post_hooks.append(ph)
+
+ def add_progress_hook(self, ph):
+ """Add the download progress hook"""
+ self._progress_hooks.append(ph)
+
+ def add_postprocessor_hook(self, ph):
+ """Add the postprocessing progress hook"""
+ self._postprocessor_hooks.append(ph)
+
+ def _bidi_workaround(self, message):
+ if not hasattr(self, '_output_channel'):
+ return message
+
+ assert hasattr(self, '_output_process')
+ assert isinstance(message, compat_str)
+ line_count = message.count('\n') + 1
+ self._output_process.stdin.write((message + '\n').encode('utf-8'))
+ self._output_process.stdin.flush()
+ res = ''.join(self._output_channel.readline().decode('utf-8')
+ for _ in range(line_count))
+ return res[:-len('\n')]
+
+ def _write_string(self, message, out=None, only_once=False):
+ if only_once:
+ if message in self._printed_messages:
+ return
+ self._printed_messages.add(message)
+ write_string(message, out=out, encoding=self.params.get('encoding'))
+
+ def to_stdout(self, message, skip_eol=False, quiet=False):
+ """Print message to stdout"""
+ if self.params.get('logger'):
+ self.params['logger'].debug(message)
+ elif not quiet or self.params.get('verbose'):
+ self._write_string(
+ '%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
+ self._err_file if quiet else self._screen_file)
+
+ def to_stderr(self, message, only_once=False):
+ """Print message to stderr"""
+ assert isinstance(message, compat_str)
+ if self.params.get('logger'):
+ self.params['logger'].error(message)
+ else:
+ self._write_string('%s\n' % self._bidi_workaround(message), self._err_file, only_once=only_once)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+
+ if self.params.get('cookiefile') is not None:
+ self.cookiejar.save(ignore_discard=True, ignore_expires=True)
+
+ def trouble(self, message=None, tb=None):
+ """Determine action to take when a download problem appears.
+
+ Depending on if the downloader has been configured to ignore
+ download errors or not, this method may throw an exception or
+ not when errors are found, after printing the message.
+
+ tb, if given, is additional traceback information.
+ """
+ if message is not None:
+ self.to_stderr(message)
+ if self.params.get('verbose'):
+ if tb is None:
+ if sys.exc_info()[0]: # if .trouble has been called from an except block
+ tb = ''
+ if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
+ tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
+ tb += encode_compat_str(traceback.format_exc())
+ else:
+ tb_data = traceback.format_list(traceback.extract_stack())
+ tb = ''.join(tb_data)
+ if tb:
+ self.to_stderr(tb)
+ if not self.params.get('ignoreerrors'):
+ if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
+ exc_info = sys.exc_info()[1].exc_info
+ else:
+ exc_info = sys.exc_info()
+ raise DownloadError(message, exc_info)
+ self._download_retcode = 1
+
+ def to_screen(self, message, skip_eol=False):
+ """Print message to stdout if not in quiet mode"""
+ self.to_stdout(
+ message, skip_eol, quiet=self.params.get('quiet', False))
+
+ def report_warning(self, message, only_once=False):
+ '''
+ Print the message to stderr, it will be prefixed with 'WARNING:'
+ If stderr is a tty file the 'WARNING:' will be colored
+ '''
+ if self.params.get('logger') is not None:
+ self.params['logger'].warning(message)
+ else:
+ if self.params.get('no_warnings'):
+ return
+ self.to_stderr(f'{"WARNING:"} {message}', only_once)
+
+ def report_error(self, message, tb=None):
+ '''
+ Do the same as trouble, but prefixes the message with 'ERROR:', colored
+ in red if stderr is a tty file.
+ '''
+ self.trouble(f'{"ERROR:"} {message}', tb)
+
+ def write_debug(self, message, only_once=False):
+ '''Log debug message or Print message to stderr'''
+ if not self.params.get('verbose', False):
+ return
+ message = '[debug] %s' % message
+ if self.params.get('logger'):
+ self.params['logger'].debug(message)
+ else:
+ self.to_stderr(message, only_once)
+
+ def raise_no_formats(self, info, forced=False):
+ has_drm = info.get('__has_drm')
+ msg = 'This video is DRM protected' if has_drm else 'No video formats found!'
+ expected = self.params.get('ignore_no_formats_error')
+ if forced or not expected:
+ raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
+ expected=has_drm or expected)
+ else:
+ self.report_warning(msg)
+
+ def _match_entry(self, info_dict, incomplete=False, silent=False):
+ """ Returns None if the file should be downloaded """
+
+ video_title = info_dict.get('title', info_dict.get('id', 'video'))
+
+ def check_filter():
+ if 'title' in info_dict:
+ # This can happen when we're just evaluating the playlist
+ title = info_dict['title']
+ matchtitle = self.params.get('matchtitle', False)
+ if matchtitle:
+ if not re.search(matchtitle, title, re.IGNORECASE):
+ return '"' + title + '" title did not match pattern "' + matchtitle + '"'
+ rejecttitle = self.params.get('rejecttitle', False)
+ if rejecttitle:
+ if re.search(rejecttitle, title, re.IGNORECASE):
+ return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
+ view_count = info_dict.get('view_count')
+ if view_count is not None:
+ min_views = self.params.get('min_views')
+ if min_views is not None and view_count < min_views:
+ return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
+ max_views = self.params.get('max_views')
+ if max_views is not None and view_count > max_views:
+ return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
+
+ match_filter = self.params.get('match_filter')
+ if match_filter is not None:
+ try:
+ ret = match_filter(info_dict, incomplete=incomplete)
+ except TypeError:
+ # For backward compatibility
+ ret = None if incomplete else match_filter(info_dict)
+ if ret is not None:
+ return ret
+ return None
+
+ reason = check_filter()
+ break_opt, break_err = 'break_on_reject', RejectedVideoReached
+ if reason is not None:
+ if not silent:
+ self.to_screen('[download] ' + reason)
+ if self.params.get(break_opt, False):
+ raise break_err()
+ return reason
+
+ @staticmethod
+ def add_extra_info(info_dict, extra_info):
+ '''Set the keys from extra_info in info dict if they are missing'''
+ for key, value in extra_info.items():
+ info_dict.setdefault(key, value)
+
+ def extract_info(self, url, download=True, ie_key=None, extra_info=None,
+ process=True, force_generic_extractor=False):
+ """
+ Return a list with a dictionary for each video extracted.
+
+ Arguments:
+ url -- URL to extract
+
+ Keyword arguments:
+ download -- whether to download videos during extraction
+ ie_key -- extractor key hint
+ extra_info -- dictionary containing the extra values to add to each result
+ process -- whether to resolve all unresolved references (URLs, playlist items),
+ must be True for download to work.
+ force_generic_extractor -- force using the generic extractor
+ """
+
+ if extra_info is None:
+ extra_info = {}
+
+ if not ie_key and force_generic_extractor:
+ ie_key = 'Generic'
+
+ if ie_key:
+ ies = {ie_key: self._get_info_extractor_class(ie_key)}
+ else:
+ ies = self._ies
+
+ for ie_key, ie in ies.items():
+ if not ie.suitable(url):
+ continue
+
+ if not ie.working():
+ self.report_warning('The program functionality for this site has been marked as broken, '
+ 'and will probably not work.')
+
+ return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process)
+ else:
+ self.report_error('no suitable InfoExtractor for URL %s' % url)
+
+ def __handle_extraction_exceptions(func):
+ @functools.wraps(func)
+ def wrapper(self, *args, **kwargs):
+ try:
+ return func(self, *args, **kwargs)
+ except GeoRestrictedError as e:
+ msg = e.msg
+ msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
+ self.report_error(msg)
+ except ExtractorError as e: # An error we somewhat expected
+ self.report_error(compat_str(e), e.format_traceback())
+ except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached, LazyList.IndexError):
+ raise
+ except Exception as e:
+ raise e
+ return wrapper
+
+ @__handle_extraction_exceptions
+ def __extract_info(self, url, ie, download, extra_info, process):
+ ie_result = ie.extract(url)
+ if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
+ return
+ if isinstance(ie_result, list):
+ # Backwards compatibility: old IE result format
+ ie_result = {
+ '_type': 'compat_list',
+ 'entries': ie_result,
+ }
+ if extra_info.get('original_url'):
+ ie_result.setdefault('original_url', extra_info['original_url'])
+ self.add_default_extra_info(ie_result, ie, url)
+ if process:
+ return self.process_ie_result(ie_result, download, extra_info)
+ else:
+ return ie_result
+
+ def add_default_extra_info(self, ie_result, ie, url):
+ if url is not None:
+ self.add_extra_info(ie_result, {
+ 'webpage_url': url,
+ 'original_url': url,
+ 'webpage_url_basename': url_basename(url),
+ })
+ if ie is not None:
+ self.add_extra_info(ie_result, {
+ 'extractor': ie.IE_NAME,
+ 'extractor_key': ie.ie_key(),
+ })
+
+ def process_ie_result(self, ie_result, download=True, extra_info=None):
+ """
+ Take the result of the ie(may be modified) and resolve all unresolved
+ references (URLs, playlist items).
+
+ It will also download the videos if 'download'.
+ Returns the resolved ie_result.
+ """
+ if extra_info is None:
+ extra_info = {}
+ result_type = ie_result.get('_type', 'video')
+
+ if result_type in ('url', 'url_transparent'):
+ ie_result['url'] = sanitize_url(ie_result['url'])
+ if ie_result.get('original_url'):
+ extra_info.setdefault('original_url', ie_result['original_url'])
+
+ extract_flat = self.params.get('extract_flat', False)
+ if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
+ or extract_flat is True):
+ info_copy = ie_result.copy()
+ ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
+ if ie and not ie_result.get('id'):
+ info_copy['id'] = ie.get_temp_id(ie_result['url'])
+ self.add_default_extra_info(info_copy, ie, ie_result['url'])
+ self.add_extra_info(info_copy, extra_info)
+ self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
+ if self.params.get('force_write_download_archive', False):
+ self.record_download_archive(info_copy)
+ return ie_result
+
+ if result_type == 'video':
+ self.add_extra_info(ie_result, extra_info)
+ ie_result = self.process_video_result(ie_result, download=download)
+ additional_urls = (ie_result or {}).get('additional_urls')
+ if additional_urls:
+ # TODO: Improve MetadataParserPP to allow setting a list
+ if isinstance(additional_urls, compat_str):
+ additional_urls = [additional_urls]
+ self.to_screen(
+ '[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
+ self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
+ ie_result['additional_entries'] = [
+ self.extract_info(
+ url, download, extra_info,
+ force_generic_extractor=self.params.get('force_generic_extractor'))
+ for url in additional_urls
+ ]
+ return ie_result
+ elif result_type == 'url':
+ # We have to add extra_info to the results because it may be
+ # contained in a playlist
+ return self.extract_info(
+ ie_result['url'], download,
+ ie_key=ie_result.get('ie_key'),
+ extra_info=extra_info)
+ elif result_type == 'url_transparent':
+ # Use the information from the embedding page
+ info = self.extract_info(
+ ie_result['url'], ie_key=ie_result.get('ie_key'),
+ extra_info=extra_info, download=False, process=False)
+
+ # extract_info may return None when ignoreerrors is enabled and
+ # extraction failed with an error, don't crash and return early
+ # in this case
+ if not info:
+ return info
+
+ force_properties = dict(
+ (k, v) for k, v in ie_result.items() if v is not None)
+ for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
+ if f in force_properties:
+ del force_properties[f]
+ new_result = info.copy()
+ new_result.update(force_properties)
+
+ # Extracted info may not be a video result (i.e.
+ # info.get('_type', 'video') != video) but rather an url or
+ # url_transparent. In such cases outer metadata (from ie_result)
+ # should be propagated to inner one (info). For this to happen
+ # _type of info should be overridden with url_transparent. This
+ # fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
+ if new_result.get('_type') == 'url':
+ new_result['_type'] = 'url_transparent'
+
+ return self.process_ie_result(
+ new_result, download=download, extra_info=extra_info)
+ elif result_type in ('playlist', 'multi_video'):
+ # Protect from infinite recursion due to recursively nested playlists
+ # (see https://github.com/ytdl-org/youtube-dl/issues/27833)
+ webpage_url = ie_result['webpage_url']
+ if webpage_url in self._playlist_urls:
+ self.to_screen(
+ '[download] Skipping already downloaded playlist: %s'
+ % ie_result.get('title') or ie_result.get('id'))
+ return
+
+ self._playlist_level += 1
+ self._playlist_urls.add(webpage_url)
+ self._sanitize_thumbnails(ie_result)
+ try:
+ return self.__process_playlist(ie_result, download)
+ finally:
+ self._playlist_level -= 1
+ if not self._playlist_level:
+ self._playlist_urls.clear()
+ elif result_type == 'compat_list':
+ self.report_warning(
+ 'Extractor %s returned a compat_list result. '
+ 'It needs to be updated.' % ie_result.get('extractor'))
+
+ def _fixup(r):
+ self.add_extra_info(r, {
+ 'extractor': ie_result['extractor'],
+ 'webpage_url': ie_result['webpage_url'],
+ 'webpage_url_basename': url_basename(ie_result['webpage_url']),
+ 'extractor_key': ie_result['extractor_key'],
+ })
+ return r
+ ie_result['entries'] = [
+ self.process_ie_result(_fixup(r), download, extra_info)
+ for r in ie_result['entries']
+ ]
+ return ie_result
+ else:
+ raise Exception('Invalid result type: %s' % result_type)
+
+ def __process_playlist(self, ie_result, download):
+ # We process each entry in the playlist
+ playlist = ie_result.get('title') or ie_result.get('id')
+ self.to_screen('[download] Downloading playlist: %s' % playlist)
+
+ if 'entries' not in ie_result:
+ raise EntryNotInPlaylist('There are no entries')
+ incomplete_entries = bool(ie_result.get('requested_entries'))
+ if incomplete_entries:
+ def fill_missing_entries(entries, indexes):
+ ret = [None] * max(*indexes)
+ for i, entry in zip(indexes, entries):
+ ret[i - 1] = entry
+ return ret
+ ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries'])
+
+ playlist_results = []
+
+ playliststart = self.params.get('playliststart', 1)
+ playlistend = self.params.get('playlistend')
+ # For backwards compatibility, interpret -1 as whole list
+ if playlistend == -1:
+ playlistend = None
+
+ playlistitems_str = self.params.get('playlist_items')
+ playlistitems = None
+ if playlistitems_str is not None:
+ def iter_playlistitems(format):
+ for string_segment in format.split(','):
+ if '-' in string_segment:
+ start, end = string_segment.split('-')
+ for item in range(int(start), int(end) + 1):
+ yield int(item)
+ else:
+ yield int(string_segment)
+ playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
+
+ ie_entries = ie_result['entries']
+ msg = (
+ 'Downloading %d videos' if not isinstance(ie_entries, list)
+ else 'Collected %d videos; downloading %%d of them' % len(ie_entries))
+
+ if isinstance(ie_entries, list):
+ def get_entry(i):
+ return ie_entries[i - 1]
+ else:
+ if not isinstance(ie_entries, (PagedList, LazyList)):
+ ie_entries = LazyList(ie_entries)
+
+ def get_entry(i):
+ return YoutubeDL.__handle_extraction_exceptions(
+ lambda self, i: ie_entries[i - 1]
+ )(self, i)
+
+ entries = []
+ items = playlistitems if playlistitems is not None else itertools.count(playliststart)
+ for i in items:
+ if i == 0:
+ continue
+ if playlistitems is None and playlistend is not None and playlistend < i:
+ break
+ entry = None
+ try:
+ entry = get_entry(i)
+ if entry is None:
+ raise EntryNotInPlaylist()
+ except (IndexError, EntryNotInPlaylist):
+ if incomplete_entries:
+ raise EntryNotInPlaylist()
+ elif not playlistitems:
+ break
+ entries.append(entry)
+ try:
+ if entry is not None:
+ self._match_entry(entry, incomplete=True, silent=True)
+ except (ExistingVideoReached, RejectedVideoReached):
+ break
+ ie_result['entries'] = entries
+
+ # Save playlist_index before re-ordering
+ entries = [
+ ((playlistitems[i - 1] if playlistitems else i + playliststart - 1), entry)
+ for i, entry in enumerate(entries, 1)
+ if entry is not None]
+ n_entries = len(entries)
+
+ if not playlistitems and (playliststart or playlistend):
+ playlistitems = list(range(playliststart, playliststart + n_entries))
+ ie_result['requested_entries'] = playlistitems
+
+ if self.params.get('allow_playlist_files', True):
+ ie_copy = {
+ 'playlist': playlist,
+ 'playlist_id': ie_result.get('id'),
+ 'playlist_title': ie_result.get('title'),
+ 'playlist_uploader': ie_result.get('uploader'),
+ 'playlist_uploader_id': ie_result.get('uploader_id'),
+ 'playlist_index': 0,
+ }
+ ie_copy.update(dict(ie_result))
+
+ if self.params.get('playlistreverse', False):
+ entries = entries[::-1]
+ if self.params.get('playlistrandom', False):
+ random.shuffle(entries)
+
+ x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
+
+ self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg % n_entries))
+ failures = 0
+ max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
+ for i, entry_tuple in enumerate(entries, 1):
+ playlist_index, entry = entry_tuple
+ if 'playlist-index' in self.params.get('compat_opts', []):
+ playlist_index = playlistitems[i - 1] if playlistitems else i + playliststart - 1
+ self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
+ # This __x_forwarded_for_ip thing is a bit ugly but requires
+ # minimal changes
+ if x_forwarded_for:
+ entry['__x_forwarded_for_ip'] = x_forwarded_for
+ extra = {
+ 'n_entries': n_entries,
+ '_last_playlist_index': max(playlistitems) if playlistitems else (playlistend or n_entries),
+ 'playlist_index': playlist_index,
+ 'playlist_autonumber': i,
+ 'playlist': playlist,
+ 'playlist_id': ie_result.get('id'),
+ 'playlist_title': ie_result.get('title'),
+ 'playlist_uploader': ie_result.get('uploader'),
+ 'playlist_uploader_id': ie_result.get('uploader_id'),
+ 'extractor': ie_result['extractor'],
+ 'webpage_url': ie_result['webpage_url'],
+ 'webpage_url_basename': url_basename(ie_result['webpage_url']),
+ 'extractor_key': ie_result['extractor_key'],
+ }
+
+ if self._match_entry(entry, incomplete=True) is not None:
+ continue
+
+ entry_result = self.__process_iterable_entry(entry, download, extra)
+ if not entry_result:
+ failures += 1
+ if failures >= max_failures:
+ self.report_error(
+ 'Skipping the remaining entries in playlist "%s" since %d items failed extraction' % (playlist, failures))
+ break
+ # TODO: skip failed (empty) entries?
+ playlist_results.append(entry_result)
+ ie_result['entries'] = playlist_results
+ self.to_screen('[download] Finished downloading playlist: %s' % playlist)
+ return ie_result
+
+ @__handle_extraction_exceptions
+ def __process_iterable_entry(self, entry, download, extra_info):
+ return self.process_ie_result(
+ entry, download=download, extra_info=extra_info)
+
+ def _default_format_spec(self, info_dict, download=True):
+
+ prefer_best = (
+ not self.params.get('simulate')
+ and download
+ and info_dict.get('is_live', False))
+ compat = (
+ prefer_best
+ or self.params.get('allow_multiple_audio_streams', False)
+ or 'format-spec' in self.params.get('compat_opts', []))
+
+ return (
+ 'best/bestvideo+bestaudio' if prefer_best
+ else 'bestvideo*+bestaudio/best' if not compat
+ else 'bestvideo+bestaudio/best')
+
+ def build_format_selector(self, format_spec):
+ def syntax_error(note, start):
+ message = (
+ 'Invalid format specification: '
+ '{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
+ return SyntaxError(message)
+
+ PICKFIRST = 'PICKFIRST'
+ MERGE = 'MERGE'
+ SINGLE = 'SINGLE'
+ GROUP = 'GROUP'
+ FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
+
+ check_formats = self.params.get('check_formats')
+
+ def _parse_filter(tokens):
+ filter_parts = []
+ for type, string, start, _, _ in tokens:
+ if type == tokenize.OP and string == ']':
+ return ''.join(filter_parts)
+ else:
+ filter_parts.append(string)
+
+ def _remove_unused_ops(tokens):
+ # Remove operators that we don't use and join them with the surrounding strings
+ # for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
+ ALLOWED_OPS = ('/', '+', ',', '(', ')')
+ last_string, last_start, last_end, last_line = None, None, None, None
+ for type, string, start, end, line in tokens:
+ if type == tokenize.OP and string == '[':
+ if last_string:
+ yield tokenize.NAME, last_string, last_start, last_end, last_line
+ last_string = None
+ yield type, string, start, end, line
+ # everything inside brackets will be handled by _parse_filter
+ for type, string, start, end, line in tokens:
+ yield type, string, start, end, line
+ if type == tokenize.OP and string == ']':
+ break
+ elif type == tokenize.OP and string in ALLOWED_OPS:
+ if last_string:
+ yield tokenize.NAME, last_string, last_start, last_end, last_line
+ last_string = None
+ yield type, string, start, end, line
+ elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
+ if not last_string:
+ last_string = string
+ last_start = start
+ last_end = end
+ else:
+ last_string += string
+ if last_string:
+ yield tokenize.NAME, last_string, last_start, last_end, last_line
+
+ def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
+ selectors = []
+ current_selector = None
+ for type, string, start, _, _ in tokens:
+ # ENCODING is only defined in python 3.x
+ if type == getattr(tokenize, 'ENCODING', None):
+ continue
+ elif type in [tokenize.NAME, tokenize.NUMBER]:
+ current_selector = FormatSelector(SINGLE, string, [])
+ elif type == tokenize.OP:
+ if string == ')':
+ if not inside_group:
+ # ')' will be handled by the parentheses group
+ tokens.restore_last_token()
+ break
+ elif inside_merge and string in ['/', ',']:
+ tokens.restore_last_token()
+ break
+ elif inside_choice and string == ',':
+ tokens.restore_last_token()
+ break
+ elif string == ',':
+ if not current_selector:
+ raise syntax_error('"," must follow a format selector', start)
+ selectors.append(current_selector)
+ current_selector = None
+ elif string == '/':
+ if not current_selector:
+ raise syntax_error('"/" must follow a format selector', start)
+ first_choice = current_selector
+ second_choice = _parse_format_selection(tokens, inside_choice=True)
+ current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
+ elif string == '[':
+ if not current_selector:
+ current_selector = FormatSelector(SINGLE, 'best', [])
+ format_filter = _parse_filter(tokens)
+ current_selector.filters.append(format_filter)
+ elif string == '(':
+ if current_selector:
+ raise syntax_error('Unexpected "("', start)
+ group = _parse_format_selection(tokens, inside_group=True)
+ current_selector = FormatSelector(GROUP, group, [])
+ elif string == '+':
+ if not current_selector:
+ raise syntax_error('Unexpected "+"', start)
+ selector_1 = current_selector
+ selector_2 = _parse_format_selection(tokens, inside_merge=True)
+ if not selector_2:
+ raise syntax_error('Expected a selector', start)
+ current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
+ else:
+ raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
+ elif type == tokenize.ENDMARKER:
+ break
+ if current_selector:
+ selectors.append(current_selector)
+ return selectors
+
+ def _check_formats(formats):
+ if not check_formats:
+ yield from formats
+ return
+ for f in formats:
+ self.to_screen('[info] Testing format %s' % f['format_id'])
+ temp_file = tempfile.NamedTemporaryFile(
+ suffix='.tmp', delete=False,
+ dir=self.get_output_path('temp') or None)
+ temp_file.close()
+ try:
+ success, _ = self.dl(temp_file.name, f, test=True)
+ except (DownloadError, IOError, OSError, ValueError) + network_exceptions:
+ success = False
+ finally:
+ if os.path.exists(temp_file.name):
+ try:
+ os.remove(temp_file.name)
+ except OSError:
+ self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
+ if success:
+ yield f
+ else:
+ self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
+
+ def _build_selector_function(selector):
+ if isinstance(selector, list): # ,
+ fs = [_build_selector_function(s) for s in selector]
+
+ def selector_function(ctx):
+ for f in fs:
+ yield from f(ctx)
+ return selector_function
+
+ elif selector.type == GROUP: # ()
+ selector_function = _build_selector_function(selector.selector)
+
+ elif selector.type == PICKFIRST: # /
+ fs = [_build_selector_function(s) for s in selector.selector]
+
+ def selector_function(ctx):
+ for f in fs:
+ picked_formats = list(f(ctx))
+ if picked_formats:
+ return picked_formats
+ return []
+
+ elif selector.type == MERGE: # +
+ selector_1, selector_2 = map(_build_selector_function, selector.selector)
+
+ def selector_function(ctx):
+ for pair in itertools.product(
+ selector_1(copy.deepcopy(ctx)), selector_2(copy.deepcopy(ctx))):
+ yield _merge(pair)
+
+ elif selector.type == SINGLE: # atom
+ format_spec = selector.selector or 'best'
+
+ # TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
+ if format_spec == 'all':
+ def selector_function(ctx):
+ yield from _check_formats(ctx['formats'])
+ elif format_spec == 'mergeall':
+ def selector_function(ctx):
+ formats = list(_check_formats(ctx['formats']))
+ if not formats:
+ return
+ merged_format = formats[-1]
+ for f in formats[-2::-1]:
+ merged_format = _merge((merged_format, f))
+ yield merged_format
+
+ else:
+ format_fallback, format_reverse, format_idx = False, True, 1
+ mobj = re.match(
+ r'(?Pbest|worst|b|w)(?Pvideo|audio|v|a)?(?P\*)?(?:\.(?P[1-9]\d*))?$',
+ format_spec)
+ if mobj is not None:
+ format_idx = int_or_none(mobj.group('n'), default=1)
+ format_reverse = mobj.group('bw')[0] == 'b'
+ format_type = (mobj.group('type') or [None])[0]
+ not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
+ format_modified = mobj.group('mod') is not None
+
+ format_fallback = not format_type and not format_modified # for b, w
+ _filter_f = (
+ (lambda f: f.get('%scodec' % format_type) != 'none')
+ if format_type and format_modified # bv*, ba*, wv*, wa*
+ else (lambda f: f.get('%scodec' % not_format_type) == 'none')
+ if format_type # bv, ba, wv, wa
+ else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
+ if not format_modified # b, w
+ else lambda f: True) # b*, w*
+ filter_f = lambda f: _filter_f(f) and (
+ f.get('vcodec') != 'none' or f.get('acodec') != 'none')
+ else:
+ if format_spec in self._format_selection_exts['audio']:
+ filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
+ elif format_spec in self._format_selection_exts['video']:
+ filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
+ elif format_spec in self._format_selection_exts['storyboards']:
+ filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
+ else:
+ filter_f = lambda f: f.get('format_id') == format_spec # id
+
+ def selector_function(ctx):
+ formats = list(ctx['formats'])
+ matches = list(filter(filter_f, formats)) if filter_f is not None else formats
+ if format_fallback and ctx['incomplete_formats'] and not matches:
+ # for extractors with incomplete formats (audio only (soundcloud)
+ # or video only (imgur)) best/worst will fallback to
+ # best/worst {video,audio}-only format
+ matches = formats
+ matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
+ try:
+ yield matches[format_idx - 1]
+ except IndexError:
+ return
+
+ filters = [self._build_format_filter(f) for f in selector.filters]
+
+ def final_selector(ctx):
+ ctx_copy = copy.deepcopy(ctx)
+ for _filter in filters:
+ ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
+ return selector_function(ctx_copy)
+ return final_selector
+
+ stream = io.BytesIO(format_spec.encode('utf-8'))
+ try:
+ tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
+ except tokenize.TokenError:
+ raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
+
+ class TokenIterator(object):
+ def __init__(self, tokens):
+ self.tokens = tokens
+ self.counter = 0
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ if self.counter >= len(self.tokens):
+ raise StopIteration()
+ value = self.tokens[self.counter]
+ self.counter += 1
+ return value
+
+ next = __next__
+
+ def restore_last_token(self):
+ self.counter -= 1
+
+ parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
+ return _build_selector_function(parsed_selector)
+
+ def _calc_headers(self, info_dict):
+ res = std_headers.copy()
+
+ add_headers = info_dict.get('http_headers')
+ if add_headers:
+ res.update(add_headers)
+
+ if 'X-Forwarded-For' not in res:
+ x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
+ if x_forwarded_for_ip:
+ res['X-Forwarded-For'] = x_forwarded_for_ip
+
+ return res
+
+ def _sanitize_thumbnails(self, info_dict):
+ thumbnails = info_dict.get('thumbnails')
+ if thumbnails is None:
+ thumbnail = info_dict.get('thumbnail')
+ if thumbnail:
+ info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
+ if thumbnails:
+ thumbnails.sort(key=lambda t: (
+ t.get('preference') if t.get('preference') is not None else -1,
+ t.get('width') if t.get('width') is not None else -1,
+ t.get('height') if t.get('height') is not None else -1,
+ t.get('id') if t.get('id') is not None else '',
+ t.get('url')))
+
+ def thumbnail_tester():
+ if self.params.get('check_formats'):
+ test_all = True
+ to_screen = lambda msg: self.to_screen(f'[info] {msg}')
+ else:
+ test_all = False
+ to_screen = self.write_debug
+
+ def test_thumbnail(t):
+ if not test_all and not t.get('_test_url'):
+ return True
+ to_screen('Testing thumbnail %s' % t['id'])
+ try:
+ self.urlopen(HEADRequest(t['url']))
+ except network_exceptions as err:
+ to_screen('Unable to connect to thumbnail %s URL "%s" - %s. Skipping...' % (
+ t['id'], t['url'], error_to_compat_str(err)))
+ return False
+ return True
+
+ return test_thumbnail
+
+ for i, t in enumerate(thumbnails):
+ if t.get('id') is None:
+ t['id'] = '%d' % i
+ if t.get('width') and t.get('height'):
+ t['resolution'] = '%dx%d' % (t['width'], t['height'])
+ t['url'] = sanitize_url(t['url'])
+
+ if self.params.get('check_formats') is not False:
+ info_dict['thumbnails'] = LazyList(filter(thumbnail_tester(), thumbnails[::-1])).reverse()
+ else:
+ info_dict['thumbnails'] = thumbnails
+
+ def process_video_result(self, info_dict, download=True):
+ assert info_dict.get('_type', 'video') == 'video'
+
+ if 'id' not in info_dict:
+ raise ExtractorError('Missing "id" field in extractor result')
+ if 'title' not in info_dict:
+ raise ExtractorError('Missing "title" field in extractor result',
+ video_id=info_dict['id'], ie=info_dict['extractor'])
+
+ def report_force_conversion(field, field_not, conversion):
+ self.report_warning(
+ '"%s" field is not %s - forcing %s conversion, there is an error in extractor'
+ % (field, field_not, conversion))
+
+ def sanitize_string_field(info, string_field):
+ field = info.get(string_field)
+ if field is None or isinstance(field, compat_str):
+ return
+ report_force_conversion(string_field, 'a string', 'string')
+ info[string_field] = compat_str(field)
+
+ def sanitize_numeric_fields(info):
+ for numeric_field in self._NUMERIC_FIELDS:
+ field = info.get(numeric_field)
+ if field is None or isinstance(field, compat_numeric_types):
+ continue
+ report_force_conversion(numeric_field, 'numeric', 'int')
+ info[numeric_field] = int_or_none(field)
+
+ sanitize_string_field(info_dict, 'id')
+ sanitize_numeric_fields(info_dict)
+
+ if 'playlist' not in info_dict:
+ # It isn't part of a playlist
+ info_dict['playlist'] = None
+ info_dict['playlist_index'] = None
+
+ self._sanitize_thumbnails(info_dict)
+
+ thumbnail = info_dict.get('thumbnail')
+ thumbnails = info_dict.get('thumbnails')
+ if thumbnail:
+ info_dict['thumbnail'] = sanitize_url(thumbnail)
+ elif thumbnails:
+ info_dict['thumbnail'] = thumbnails[-1]['url']
+
+ if info_dict.get('display_id') is None and 'id' in info_dict:
+ info_dict['display_id'] = info_dict['id']
+
+ for ts_key, date_key in (
+ ('timestamp', 'upload_date'),
+ ('release_timestamp', 'release_date'),
+ ):
+ if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
+ # Working around out-of-range timestamp values (e.g. negative ones on Windows,
+ # see http://bugs.python.org/issue1646728)
+ try:
+ upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
+ info_dict[date_key] = upload_date.strftime('%Y%m%d')
+ except (ValueError, OverflowError, OSError):
+ pass
+
+ live_keys = ('is_live', 'was_live')
+ live_status = info_dict.get('live_status')
+ if live_status is None:
+ for key in live_keys:
+ if info_dict.get(key) is False:
+ continue
+ if info_dict.get(key):
+ live_status = key
+ break
+ if all(info_dict.get(key) is False for key in live_keys):
+ live_status = 'not_live'
+ if live_status:
+ info_dict['live_status'] = live_status
+ for key in live_keys:
+ if info_dict.get(key) is None:
+ info_dict[key] = (live_status == key)
+
+ # Auto generate title fields corresponding to the *_number fields when missing
+ # in order to always have clean titles. This is very common for TV series.
+ for field in ('chapter', 'season', 'episode'):
+ if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
+ info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
+
+ for cc_kind in ('subtitles', 'automatic_captions'):
+ cc = info_dict.get(cc_kind)
+ if cc:
+ for _, subtitle in cc.items():
+ for subtitle_format in subtitle:
+ if subtitle_format.get('url'):
+ subtitle_format['url'] = sanitize_url(subtitle_format['url'])
+ if subtitle_format.get('ext') is None:
+ subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
+
+ automatic_captions = info_dict.get('automatic_captions')
+ subtitles = info_dict.get('subtitles')
+
+ # We now pick which formats have to be downloaded
+ if info_dict.get('formats') is None:
+ # There's only one format available
+ formats = [info_dict]
+ else:
+ formats = info_dict['formats']
+
+ info_dict['__has_drm'] = any(f.get('has_drm') for f in formats)
+ if not self.params.get('allow_unplayable_formats'):
+ formats = [f for f in formats if not f.get('has_drm')]
+
+ if not formats:
+ self.raise_no_formats(info_dict)
+
+ def is_wellformed(f):
+ url = f.get('url')
+ if not url:
+ self.report_warning(
+ '"url" field is missing or empty - skipping format, '
+ 'there is an error in extractor')
+ return False
+ if isinstance(url, bytes):
+ sanitize_string_field(f, 'url')
+ return True
+
+ # Filter out malformed formats for better extraction robustness
+ formats = list(filter(is_wellformed, formats))
+
+ formats_dict = {}
+
+ # We check that all the formats have the format and format_id fields
+ for i, format in enumerate(formats):
+ sanitize_string_field(format, 'format_id')
+ sanitize_numeric_fields(format)
+ format['url'] = sanitize_url(format['url'])
+ if not format.get('format_id'):
+ format['format_id'] = compat_str(i)
+ else:
+ # Sanitize format_id from characters used in format selector expression
+ format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
+ format_id = format['format_id']
+ if format_id not in formats_dict:
+ formats_dict[format_id] = []
+ formats_dict[format_id].append(format)
+
+ # Make sure all formats have unique format_id
+ common_exts = set(itertools.chain(*self._format_selection_exts.values()))
+ for format_id, ambiguous_formats in formats_dict.items():
+ ambigious_id = len(ambiguous_formats) > 1
+ for i, format in enumerate(ambiguous_formats):
+ if ambigious_id:
+ format['format_id'] = '%s-%d' % (format_id, i)
+ if format.get('ext') is None:
+ format['ext'] = determine_ext(format['url']).lower()
+ # Ensure there is no conflict between id and ext in format selection
+ # See https://github.com/yt-dlp/yt-dlp/issues/1282
+ if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
+ format['format_id'] = 'f%s' % format['format_id']
+
+ for i, format in enumerate(formats):
+ if format.get('format') is None:
+ format['format'] = '{id} - {res}{note}'.format(
+ id=format['format_id'],
+ res=self.format_resolution(format),
+ note=format_field(format, 'format_note', ' (%s)'),
+ )
+ # Add HTTP headers, so that external programs can use them from the
+ # json output
+ full_format_info = info_dict.copy()
+ full_format_info.update(format)
+ format['http_headers'] = self._calc_headers(full_format_info)
+ # Remove private housekeeping stuff
+ if '__x_forwarded_for_ip' in info_dict:
+ del info_dict['__x_forwarded_for_ip']
+
+ # TODO Central sorting goes here
+
+ if not formats or formats[0] is not info_dict:
+ # only set the 'formats' fields if the original info_dict list them
+ # otherwise we end up with a circular reference, the first (and unique)
+ # element in the 'formats' field in info_dict is info_dict itself,
+ # which can't be exported to json
+ info_dict['formats'] = formats
+
+ info_dict, _ = self.pre_process(info_dict)
+
+ if self.params.get('list_thumbnails'):
+ self.list_thumbnails(info_dict)
+ if self.params.get('listformats'):
+ if not info_dict.get('formats') and not info_dict.get('url'):
+ self.to_screen('%s has no formats' % info_dict['id'])
+ else:
+ self.list_formats(info_dict)
+ if self.params.get('listsubtitles'):
+ if 'automatic_captions' in info_dict:
+ self.list_subtitles(
+ info_dict['id'], automatic_captions, 'automatic captions')
+ self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
+ list_only = self.params.get('simulate') is None and (
+ self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
+ if list_only:
+ # Without this printing, -F --print-json will not work
+ self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
+ return
+
+ format_selector = self.format_selector
+ if format_selector is None:
+ req_format = self._default_format_spec(info_dict, download=download)
+ self.write_debug('Default format spec: %s' % req_format)
+ format_selector = self.build_format_selector(req_format)
+
+ # While in format selection we may need to have an access to the original
+ # format set in order to calculate some metrics or do some processing.
+ # For now we need to be able to guess whether original formats provided
+ # by extractor are incomplete or not (i.e. whether extractor provides only
+ # video-only or audio-only formats) for proper formats selection for
+ # extractors with such incomplete formats (see
+ # https://github.com/ytdl-org/youtube-dl/pull/5556).
+ # Since formats may be filtered during format selection and may not match
+ # the original formats the results may be incorrect. Thus original formats
+ # or pre-calculated metrics should be passed to format selection routines
+ # as well.
+ # We will pass a context object containing all necessary additional data
+ # instead of just formats.
+ # This fixes incorrect format selection issue (see
+ # https://github.com/ytdl-org/youtube-dl/issues/10083).
+ incomplete_formats = (
+ # All formats are video-only or
+ all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
+ # all formats are audio-only
+ or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
+
+ ctx = {
+ 'formats': formats,
+ 'incomplete_formats': incomplete_formats,
+ }
+
+ formats_to_download = list(format_selector(ctx))
+ if not formats_to_download:
+ if not self.params.get('ignore_no_formats_error'):
+ raise ExtractorError('Requested format is not available', expected=True,
+ video_id=info_dict['id'], ie=info_dict['extractor'])
+ else:
+ self.report_warning('Requested format is not available')
+ # Process what we can, even without any available formats.
+ self.process_info(dict(info_dict))
+ elif download:
+ self.to_screen(
+ '[info] %s: Downloading %d format(s): %s' % (
+ info_dict['id'], len(formats_to_download),
+ ", ".join([f['format_id'] for f in formats_to_download])))
+ for fmt in formats_to_download:
+ new_info = dict(info_dict)
+ # Save a reference to the original info_dict so that it can be modified in process_info if needed
+ new_info['__original_infodict'] = info_dict
+ new_info.update(fmt)
+ self.process_info(new_info)
+ # We update the info dict with the best quality format (backwards compatibility)
+ if formats_to_download:
+ info_dict.update(formats_to_download[-1])
+ return info_dict
+
+ def __forced_printings(self, info_dict, filename, incomplete):
+ def print_mandatory(field, actual_field=None):
+ if actual_field is None:
+ actual_field = field
+ if (self.params.get('force%s' % field, False)
+ and (not incomplete or info_dict.get(actual_field) is not None)):
+ self.to_stdout(info_dict[actual_field])
+
+ info_dict = info_dict.copy()
+ if filename is not None:
+ info_dict['filename'] = filename
+ if info_dict.get('requested_formats') is not None:
+ # For RTMP URLs, also include the playpath
+ info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
+ elif 'url' in info_dict:
+ info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
+
+ print_mandatory('title')
+ print_mandatory('id')
+ print_mandatory('url', 'urls')
+ print_mandatory('format')
+
+ if self.params.get('forcejson'):
+ self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
+
+ def process_info(self, info_dict):
+ """Process a single resolved IE result."""
+
+ assert info_dict.get('_type', 'video') == 'video'
+
+ max_downloads = self.params.get('max_downloads')
+ if max_downloads is not None:
+ if self._num_downloads >= int(max_downloads):
+ raise MaxDownloadsReached()
+
+ # TODO: backward compatibility, to be removed
+ info_dict['fulltitle'] = info_dict['title']
+
+ if 'format' not in info_dict and 'ext' in info_dict:
+ info_dict['format'] = info_dict['ext']
+
+ if self._match_entry(info_dict) is not None:
+ return
+
+ self.post_extract(info_dict)
+ self._num_downloads += 1
+
+ # info_dict['_filename'] needs to be set for backward compatibility
+ #info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
+ #temp_filename = self.prepare_filename(info_dict, 'temp')
+ files_to_move = {}
+
+ # Forced printings
+ self.__forced_printings(info_dict, 'full_filename', incomplete=('format' not in info_dict))
+
+ if self.params.get('simulate'):
+ if self.params.get('force_write_download_archive', False):
+ self.record_download_archive(info_dict)
+ # Do nothing else if in simulate mode
+ return
+
+ try:
+ info_dict, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
+ except PostProcessingError as err:
+ self.report_error('Preprocessing: %s' % str(err))
+ return
+
+ max_downloads = self.params.get('max_downloads')
+ if max_downloads is not None and self._num_downloads >= int(max_downloads):
+ raise MaxDownloadsReached()
+
+ def download(self, url_list):
+ """Download a given list of URLs."""
+ for url in url_list:
+ try:
+ # It also downloads the videos
+ res = self.extract_info(
+ url, force_generic_extractor=self.params.get('force_generic_extractor', False))
+ except UnavailableVideoError:
+ self.report_error('unable to download video')
+ except MaxDownloadsReached:
+ self.to_screen('[info] Maximum number of downloads reached')
+ raise
+ except ExistingVideoReached:
+ self.to_screen('[info] Encountered a video that is already in the archive, stopping due to --break-on-existing')
+ raise
+ except RejectedVideoReached:
+ self.to_screen('[info] Encountered a video that did not match filter, stopping due to --break-on-reject')
+ raise
+ else:
+ if self.params.get('dump_single_json', False):
+ self.post_extract(res)
+ self.to_stdout(json.dumps(self.sanitize_info(res)))
+
+ return self._download_retcode
+
+ @staticmethod
+ def sanitize_info(info_dict, remove_private_keys=False):
+ ''' Sanitize the infodict for converting to json '''
+ if info_dict is None:
+ return info_dict
+ info_dict.setdefault('epoch', int(time.time()))
+ remove_keys = {'__original_infodict'} # Always remove this since this may contain a copy of the entire dict
+ keep_keys = ['_type'], # Always keep this to facilitate load-info-json
+ if remove_private_keys:
+ remove_keys |= {
+ 'requested_formats', 'requested_subtitles', 'requested_entries',
+ 'filepath', 'entries', 'original_url', 'playlist_autonumber',
+ }
+ empty_values = (None, {}, [], set(), tuple())
+ reject = lambda k, v: k not in keep_keys and (
+ k.startswith('_') or k in remove_keys or v in empty_values)
+ else:
+ reject = lambda k, v: k in remove_keys
+ filter_fn = lambda obj: (
+ list(map(filter_fn, obj)) if isinstance(obj, (LazyList, list, tuple, set))
+ else obj if not isinstance(obj, dict)
+ else dict((k, filter_fn(v)) for k, v in obj.items() if not reject(k, v)))
+ return filter_fn(info_dict)
+
+ @staticmethod
+ def filter_requested_info(info_dict, actually_filter=True):
+ ''' Alias of sanitize_info for backward compatibility '''
+ return YoutubeDL.sanitize_info(info_dict, actually_filter)
+
+ @staticmethod
+ def post_extract(info_dict):
+ def actual_post_extract(info_dict):
+ if info_dict.get('_type') in ('playlist', 'multi_video'):
+ for video_dict in info_dict.get('entries', {}):
+ actual_post_extract(video_dict or {})
+ return
+
+ post_extractor = info_dict.get('__post_extractor') or (lambda: {})
+ extra = post_extractor().items()
+ info_dict.update(extra)
+ info_dict.pop('__post_extractor', None)
+
+ original_infodict = info_dict.get('__original_infodict') or {}
+ original_infodict.update(extra)
+ original_infodict.pop('__post_extractor', None)
+
+ actual_post_extract(info_dict or {})
+
+ def pre_process(self, ie_info, key='pre_process', files_to_move=None):
+ info = dict(ie_info)
+ info['__files_to_move'] = files_to_move or {}
+ for pp in self._pps[key]:
+ info = self.run_pp(pp, info)
+ return info, info.pop('__files_to_move', None)
+
+ def _make_archive_id(self, info_dict):
+ video_id = info_dict.get('id')
+ if not video_id:
+ return
+ # Future-proof against any change in case
+ # and backwards compatibility with prior versions
+ extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
+ if extractor is None:
+ url = str_or_none(info_dict.get('url'))
+ if not url:
+ return
+ # Try to find matching extractor for the URL and take its ie_key
+ for ie_key, ie in self._ies.items():
+ if ie.suitable(url):
+ extractor = ie_key
+ break
+ else:
+ return
+ return '%s %s' % (extractor.lower(), video_id)
+
+ @staticmethod
+ def format_resolution(format, default='unknown'):
+ is_images = format.get('vcodec') == 'none' and format.get('acodec') == 'none'
+ if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
+ return 'audio only'
+ if format.get('resolution') is not None:
+ return format['resolution']
+ if format.get('width') and format.get('height'):
+ res = '%dx%d' % (format['width'], format['height'])
+ elif format.get('height'):
+ res = '%sp' % format['height']
+ elif format.get('width'):
+ res = '%dx?' % format['width']
+ elif is_images:
+ return 'images'
+ else:
+ return default
+ return f'{res} images' if is_images else res
+
+ def urlopen(self, req):
+ """ Start an HTTP download """
+ if isinstance(req, compat_basestring):
+ req = sanitized_Request(req)
+ return self._opener.open(req, timeout=self._socket_timeout)
+
+ def _setup_opener(self):
+ timeout_val = self.params.get('socket_timeout')
+ self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
+
+ opts_proxy = self.params.get('proxy')
+
+ cookie_processor = YoutubeDLCookieProcessor(None)
+ if opts_proxy is not None:
+ if opts_proxy == '':
+ proxies = {}
+ else:
+ proxies = {'http': opts_proxy, 'https': opts_proxy}
+ else:
+ proxies = compat_urllib_request.getproxies()
+ # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
+ if 'http' in proxies and 'https' not in proxies:
+ proxies['https'] = proxies['http']
+ proxy_handler = PerRequestProxyHandler(proxies)
+
+ debuglevel = 1 if self.params.get('debug_printtraffic') else 0
+ https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
+ ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
+ redirect_handler = YoutubeDLRedirectHandler()
+ data_handler = compat_urllib_request_DataHandler()
+
+ # When passing our own FileHandler instance, build_opener won't add the
+ # default FileHandler and allows us to disable the file protocol, which
+ # can be used for malicious purposes (see
+ # https://github.com/ytdl-org/youtube-dl/issues/8227)
+ file_handler = compat_urllib_request.FileHandler()
+
+ def file_open(*args, **kwargs):
+ raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
+ file_handler.file_open = file_open
+
+ opener = compat_urllib_request.build_opener(
+ proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
+
+ # Delete the default user-agent header, which would otherwise apply in
+ # cases where our custom HTTP handler doesn't come into play
+ # (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
+ opener.addheaders = []
+ self._opener = opener
diff --git a/plugin.video.cc.com/resources/lib/yt_dlp/__init__.py b/plugin.video.cc.com/resources/lib/yt_dlp/__init__.py
new file mode 100644
index 0000000000..dad0ea7677
--- /dev/null
+++ b/plugin.video.cc.com/resources/lib/yt_dlp/__init__.py
@@ -0,0 +1,118 @@
+#!/usr/bin/env python3
+# coding: utf-8
+
+f'You are using an unsupported version of Python. Only Python versions 3.6 and above are supported by yt-dlp' # noqa: F541
+
+__license__ = 'Public Domain'
+
+import codecs
+import os
+import sys
+
+from .options import (
+ parseOpts,
+)
+from .compat import (
+ workaround_optparse_bug9161,
+)
+from .utils import (
+ DownloadError,
+ error_to_compat_str,
+ ExistingVideoReached,
+ MaxDownloadsReached,
+ preferredencoding,
+ RejectedVideoReached,
+ SameFileError,
+)
+from .extractor import gen_extractors, list_extractors
+
+from .YoutubeDL import YoutubeDL
+
+
+def _real_main(argv=None):
+ # Compatibility fixes for Windows
+ if sys.platform == 'win32':
+ # https://github.com/ytdl-org/youtube-dl/issues/820
+ codecs.register(lambda name: codecs.lookup('utf-8') if name == 'cp65001' else None)
+
+ workaround_optparse_bug9161()
+
+ parser, opts, args = parseOpts(argv)
+ warnings = []
+
+ # Batch file verification
+ batch_urls = []
+ all_urls = batch_urls + [url.strip() for url in args] # batch_urls are already striped in read_batch_urls
+ _enc = preferredencoding()
+ all_urls = [url.decode(_enc, 'ignore') if isinstance(url, bytes) else url for url in all_urls]
+
+ def parse_retries(retries, name=''):
+ if retries in ('inf', 'infinite'):
+ parsed_retries = float('inf')
+ else:
+ try:
+ parsed_retries = int(retries)
+ except (TypeError, ValueError):
+ parser.error('invalid %sretry count specified' % name)
+ return parsed_retries
+
+ def validate_outtmpl(tmpl, msg):
+ err = YoutubeDL.validate_outtmpl(tmpl)
+ if err:
+ parser.error('invalid %s %r: %s' % (msg, tmpl, error_to_compat_str(err)))
+
+ any_getting = opts.dumpjson
+
+ def report_conflict(arg1, arg2):
+ warnings.append('%s is ignored since %s was given' % (arg2, arg1))
+
+ def report_args_compat(arg, name):
+ warnings.append('%s given without specifying name. The arguments will be given to all %s' % (arg, name))
+
+ final_ext = None
+
+ ydl_opts = {
+ 'quiet': any_getting,
+ 'forcejson': opts.dumpjson,
+ 'logtostderr': True,
+ 'verbose': opts.verbose,
+ 'final_ext': final_ext,
+ 'warnings': warnings,
+ }
+
+ with YoutubeDL(ydl_opts) as ydl:
+ actual_use = len(all_urls)
+
+ # Maybe do nothing
+ if not actual_use:
+ ydl.warn_if_short_id(sys.argv[1:] if argv is None else argv)
+ parser.error(
+ 'You must provide at least one URL.\n'
+ 'Type yt-dlp --help to see a list of all options.')
+
+ try:
+ retcode = ydl.download(all_urls)
+ except (MaxDownloadsReached, ExistingVideoReached, RejectedVideoReached):
+ ydl.to_screen('Aborting remaining downloads')
+ retcode = 101
+
+ sys.exit(retcode)
+
+
+def main(argv=None):
+ try:
+ _real_main(argv)
+ except DownloadError:
+ sys.exit(1)
+ except SameFileError as e:
+ sys.exit(f'ERROR: {e}')
+ except KeyboardInterrupt:
+ sys.exit('\nERROR: Interrupted by user')
+ except BrokenPipeError as e:
+ # https://docs.python.org/3/library/signal.html#note-on-sigpipe
+ devnull = os.open(os.devnull, os.O_WRONLY)
+ os.dup2(devnull, sys.stdout.fileno())
+ sys.exit(f'\nERROR: {e}')
+
+
+__all__ = ['main', 'YoutubeDL', 'gen_extractors', 'list_extractors']
diff --git a/plugin.video.cc.com/resources/lib/yt_dlp/__main__.py b/plugin.video.cc.com/resources/lib/yt_dlp/__main__.py
new file mode 100644
index 0000000000..c9f41473db
--- /dev/null
+++ b/plugin.video.cc.com/resources/lib/yt_dlp/__main__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python3
+from __future__ import unicode_literals
+
+# Execute with
+# $ python yt_dlp/__main__.py (2.6+)
+# $ python -m yt_dlp (2.7+)
+
+import sys
+
+if __package__ is None and not hasattr(sys, 'frozen'):
+ # direct call of __main__.py
+ import os.path
+ path = os.path.realpath(os.path.abspath(__file__))
+ sys.path.insert(0, os.path.dirname(os.path.dirname(path)))
+
+import yt_dlp
+
+if __name__ == '__main__':
+ yt_dlp.main()
diff --git a/plugin.video.cc.com/resources/lib/yt_dlp/compat.py b/plugin.video.cc.com/resources/lib/yt_dlp/compat.py
new file mode 100644
index 0000000000..8508f1465c
--- /dev/null
+++ b/plugin.video.cc.com/resources/lib/yt_dlp/compat.py
@@ -0,0 +1,295 @@
+# coding: utf-8
+
+import asyncio
+import base64
+import ctypes
+import getpass
+import html
+import html.parser
+import http
+import http.client
+import http.cookiejar
+import http.cookies
+import http.server
+import itertools
+import optparse
+import os
+import re
+import shlex
+import shutil
+import socket
+import struct
+import subprocess
+import sys
+import tokenize
+import urllib
+import xml.etree.ElementTree as etree
+from subprocess import DEVNULL
+
+
+# HTMLParseError has been deprecated in Python 3.3 and removed in
+# Python 3.5. Introducing dummy exception for Python >3.5 for compatible
+# and uniform cross-version exception handling
+class compat_HTMLParseError(Exception):
+ pass
+
+
+# compat_ctypes_WINFUNCTYPE = ctypes.WINFUNCTYPE
+# will not work since ctypes.WINFUNCTYPE does not exist in UNIX machines
+def compat_ctypes_WINFUNCTYPE(*args, **kwargs):
+ return ctypes.WINFUNCTYPE(*args, **kwargs)
+
+
+class _TreeBuilder(etree.TreeBuilder):
+ def doctype(self, name, pubid, system):
+ pass
+
+
+def compat_etree_fromstring(text):
+ return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder()))
+
+
+compat_os_name = os._name if os.name == 'java' else os.name
+
+
+if compat_os_name == 'nt':
+ def compat_shlex_quote(s):
+ return s if re.match(r'^[-_\w./]+$', s) else '"%s"' % s.replace('"', '\\"')
+else:
+ from shlex import quote as compat_shlex_quote
+
+
+def compat_ord(c):
+ if type(c) is int:
+ return c
+ else:
+ return ord(c)
+
+
+def compat_setenv(key, value, env=os.environ):
+ env[key] = value
+
+
+if compat_os_name == 'nt' and sys.version_info < (3, 8):
+ # os.path.realpath on Windows does not follow symbolic links
+ # prior to Python 3.8 (see https://bugs.python.org/issue9949)
+ def compat_realpath(path):
+ while os.path.islink(path):
+ path = os.path.abspath(os.readlink(path))
+ return path
+else:
+ compat_realpath = os.path.realpath
+
+
+def compat_print(s):
+ assert isinstance(s, compat_str)
+ print(s)
+
+
+# Fix https://github.com/ytdl-org/youtube-dl/issues/4223
+# See http://bugs.python.org/issue9161 for what is broken
+def workaround_optparse_bug9161():
+ op = optparse.OptionParser()
+ og = optparse.OptionGroup(op, 'foo')
+ try:
+ og.add_option('-t')
+ except TypeError:
+ real_add_option = optparse.OptionGroup.add_option
+
+ def _compat_add_option(self, *args, **kwargs):
+ enc = lambda v: (
+ v.encode('ascii', 'replace') if isinstance(v, compat_str)
+ else v)
+ bargs = [enc(a) for a in args]
+ bkwargs = dict(
+ (k, enc(v)) for k, v in kwargs.items())
+ return real_add_option(self, *bargs, **bkwargs)
+ optparse.OptionGroup.add_option = _compat_add_option
+
+
+try:
+ compat_Pattern = re.Pattern
+except AttributeError:
+ compat_Pattern = type(re.compile(''))
+
+
+try:
+ compat_Match = re.Match
+except AttributeError:
+ compat_Match = type(re.compile('').match(''))
+
+
+try:
+ compat_asyncio_run = asyncio.run # >= 3.7
+except AttributeError:
+ def compat_asyncio_run(coro):
+ try:
+ loop = asyncio.get_event_loop()
+ except RuntimeError:
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ loop.run_until_complete(coro)
+
+ asyncio.run = compat_asyncio_run
+
+
+# Python 3.8+ does not honor %HOME% on windows, but this breaks compatibility with youtube-dl
+# See https://github.com/yt-dlp/yt-dlp/issues/792
+# https://docs.python.org/3/library/os.path.html#os.path.expanduser
+if compat_os_name in ('nt', 'ce') and 'HOME' in os.environ:
+ _userhome = os.environ['HOME']
+
+ def compat_expanduser(path):
+ if not path.startswith('~'):
+ return path
+ i = path.replace('\\', '/', 1).find('/') # ~user
+ if i < 0:
+ i = len(path)
+ userhome = os.path.join(os.path.dirname(_userhome), path[1:i]) if i > 1 else _userhome
+ return userhome + path[i:]
+else:
+ compat_expanduser = os.path.expanduser
+
+
+try:
+ from Cryptodome.Cipher import AES as compat_pycrypto_AES
+except ImportError:
+ try:
+ from Crypto.Cipher import AES as compat_pycrypto_AES
+ except ImportError:
+ compat_pycrypto_AES = None
+
+
+def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.python.org/issue30075
+ if compat_os_name != 'nt':
+ return
+ startupinfo = subprocess.STARTUPINFO()
+ startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
+ subprocess.Popen('', shell=True, startupinfo=startupinfo)
+
+
+# Deprecated
+
+compat_basestring = str
+compat_chr = chr
+compat_input = input
+compat_integer_types = (int, )
+compat_kwargs = lambda kwargs: kwargs
+compat_numeric_types = (int, float, complex)
+compat_str = str
+compat_xpath = lambda xpath: xpath
+compat_zip = zip
+
+compat_HTMLParser = html.parser.HTMLParser
+compat_HTTPError = urllib.error.HTTPError
+compat_Struct = struct.Struct
+compat_b64decode = base64.b64decode
+compat_cookiejar = http.cookiejar
+compat_cookiejar_Cookie = compat_cookiejar.Cookie
+compat_cookies = http.cookies
+compat_cookies_SimpleCookie = compat_cookies.SimpleCookie
+compat_etree_Element = etree.Element
+compat_etree_register_namespace = etree.register_namespace
+compat_get_terminal_size = shutil.get_terminal_size
+compat_getenv = os.getenv
+compat_getpass = getpass.getpass
+compat_html_entities = html.entities
+compat_html_entities_html5 = compat_html_entities.html5
+compat_http_client = http.client
+compat_http_server = http.server
+compat_itertools_count = itertools.count
+compat_parse_qs = urllib.parse.parse_qs
+compat_shlex_split = shlex.split
+compat_socket_create_connection = socket.create_connection
+compat_struct_pack = struct.pack
+compat_struct_unpack = struct.unpack
+compat_subprocess_get_DEVNULL = lambda: DEVNULL
+compat_tokenize_tokenize = tokenize.tokenize
+compat_urllib_error = urllib.error
+compat_urllib_parse = urllib.parse
+compat_urllib_parse_quote = urllib.parse.quote
+compat_urllib_parse_quote_plus = urllib.parse.quote_plus
+compat_urllib_parse_unquote = urllib.parse.unquote
+compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus
+compat_urllib_parse_unquote_to_bytes = urllib.parse.unquote_to_bytes
+compat_urllib_parse_urlencode = urllib.parse.urlencode
+compat_urllib_parse_urlparse = urllib.parse.urlparse
+compat_urllib_parse_urlunparse = urllib.parse.urlunparse
+compat_urllib_request = urllib.request
+compat_urllib_request_DataHandler = urllib.request.DataHandler
+compat_urllib_response = urllib.response
+compat_urlparse = urllib.parse
+compat_urlretrieve = urllib.request.urlretrieve
+compat_xml_parse_error = etree.ParseError
+
+
+# Set public objects
+
+__all__ = [
+ 'compat_HTMLParseError',
+ 'compat_HTMLParser',
+ 'compat_HTTPError',
+ 'compat_Match',
+ 'compat_Pattern',
+ 'compat_Struct',
+ 'compat_asyncio_run',
+ 'compat_b64decode',
+ 'compat_basestring',
+ 'compat_chr',
+ 'compat_cookiejar',
+ 'compat_cookiejar_Cookie',
+ 'compat_cookies',
+ 'compat_cookies_SimpleCookie',
+ 'compat_ctypes_WINFUNCTYPE',
+ 'compat_etree_Element',
+ 'compat_etree_fromstring',
+ 'compat_etree_register_namespace',
+ 'compat_expanduser',
+ 'compat_get_terminal_size',
+ 'compat_getenv',
+ 'compat_getpass',
+ 'compat_html_entities',
+ 'compat_html_entities_html5',
+ 'compat_http_client',
+ 'compat_http_server',
+ 'compat_input',
+ 'compat_integer_types',
+ 'compat_itertools_count',
+ 'compat_kwargs',
+ 'compat_numeric_types',
+ 'compat_ord',
+ 'compat_os_name',
+ 'compat_parse_qs',
+ 'compat_print',
+ 'compat_pycrypto_AES',
+ 'compat_realpath',
+ 'compat_setenv',
+ 'compat_shlex_quote',
+ 'compat_shlex_split',
+ 'compat_socket_create_connection',
+ 'compat_str',
+ 'compat_struct_pack',
+ 'compat_struct_unpack',
+ 'compat_subprocess_get_DEVNULL',
+ 'compat_tokenize_tokenize',
+ 'compat_urllib_error',
+ 'compat_urllib_parse',
+ 'compat_urllib_parse_quote',
+ 'compat_urllib_parse_quote_plus',
+ 'compat_urllib_parse_unquote',
+ 'compat_urllib_parse_unquote_plus',
+ 'compat_urllib_parse_unquote_to_bytes',
+ 'compat_urllib_parse_urlencode',
+ 'compat_urllib_parse_urlparse',
+ 'compat_urllib_parse_urlunparse',
+ 'compat_urllib_request',
+ 'compat_urllib_request_DataHandler',
+ 'compat_urllib_response',
+ 'compat_urlparse',
+ 'compat_urlretrieve',
+ 'compat_xml_parse_error',
+ 'compat_xpath',
+ 'compat_zip',
+ 'windows_enable_vt_mode',
+ 'workaround_optparse_bug9161',
+]
diff --git a/plugin.video.cc.com/resources/lib/yt_dlp/extractor/__init__.py b/plugin.video.cc.com/resources/lib/yt_dlp/extractor/__init__.py
new file mode 100644
index 0000000000..35bc838fed
--- /dev/null
+++ b/plugin.video.cc.com/resources/lib/yt_dlp/extractor/__init__.py
@@ -0,0 +1,52 @@
+from __future__ import unicode_literals
+
+from ..utils import load_plugins
+
+try:
+ from .lazy_extractors import *
+ from .lazy_extractors import _ALL_CLASSES
+ _LAZY_LOADER = True
+ _PLUGIN_CLASSES = {}
+except ImportError:
+ _LAZY_LOADER = False
+
+if not _LAZY_LOADER:
+ from .extractors import *
+ _ALL_CLASSES = [
+ klass
+ for name, klass in globals().items()
+ if name.endswith('IE') and name != 'GenericIE'
+ ]
+
+ _PLUGIN_CLASSES = load_plugins('extractor', 'IE', globals())
+ _ALL_CLASSES = list(_PLUGIN_CLASSES.values()) + _ALL_CLASSES
+
+
+def gen_extractor_classes():
+ """ Return a list of supported extractors.
+ The order does matter; the first extractor matched is the one handling the URL.
+ """
+ return _ALL_CLASSES
+
+
+def gen_extractors():
+ """ Return a list of an instance of every supported extractor.
+ The order does matter; the first extractor matched is the one handling the URL.
+ """
+ return [klass() for klass in gen_extractor_classes()]
+
+
+def list_extractors(age_limit):
+ """
+ Return a list of extractors that are suitable for the given age,
+ sorted by extractor ID.
+ """
+
+ return sorted(
+ filter(lambda ie: ie.is_suitable(age_limit), gen_extractors()),
+ key=lambda ie: ie.IE_NAME.lower())
+
+
+def get_info_extractor(ie_name):
+ """Returns the info extractor class with the given ie_name"""
+ return globals()[ie_name + 'IE']
diff --git a/plugin.video.cc.com/resources/lib/yt_dlp/extractor/comedycentral.py b/plugin.video.cc.com/resources/lib/yt_dlp/extractor/comedycentral.py
new file mode 100644
index 0000000000..ad75356211
--- /dev/null
+++ b/plugin.video.cc.com/resources/lib/yt_dlp/extractor/comedycentral.py
@@ -0,0 +1,14 @@
+from __future__ import unicode_literals
+
+from .mtv import MTVServicesInfoExtractor
+
+
+class ComedyCentralIE(MTVServicesInfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?cc\.com/(?:episodes|video(?:-clips)?|collection-playlist)/(?P[0-9a-z]{6})'
+ _FEED_URL = 'http://comedycentral.com/feeds/mrss/'
+ _MGID = False
+
+
+class ComedyCentralMgidIE(ComedyCentralIE):
+ _VALID_URL = r'mgid:arc:(?:video|episode|promo):comedycentral.com:(?P[a-z\d]{8}-[a-z\d]{4}-[a-z\d]{4}-[a-z\d]{4}-[a-z\d]{12})'
+ _MGID = True
diff --git a/plugin.video.cc.com/resources/lib/yt_dlp/extractor/common.py b/plugin.video.cc.com/resources/lib/yt_dlp/extractor/common.py
new file mode 100644
index 0000000000..cbe8b96e1e
--- /dev/null
+++ b/plugin.video.cc.com/resources/lib/yt_dlp/extractor/common.py
@@ -0,0 +1,1021 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import base64
+import hashlib
+import json
+import os
+import re
+import sys
+import time
+
+from ..compat import (
+ compat_etree_fromstring,
+ compat_http_client,
+ compat_os_name,
+ compat_str,
+ compat_urllib_error,
+ compat_urllib_request,
+ compat_urlparse,
+ compat_xml_parse_error,
+)
+
+from ..utils import (
+ compiled_regex_type,
+ determine_ext,
+ error_to_compat_str,
+ ExtractorError,
+ float_or_none,
+ format_field,
+ GeoRestrictedError,
+ network_exceptions,
+ NO_DEFAULT,
+ parse_codecs,
+ parse_m3u8_attributes,
+ RegexNotFoundError,
+ sanitize_filename,
+ sanitized_Request,
+ update_Request,
+ update_url_query,
+ variadic,
+)
+
+
+class InfoExtractor(object):
+ _ready = False
+ _downloader = None
+ _x_forwarded_for_ip = None
+ _GEO_BYPASS = True
+ _GEO_COUNTRIES = None
+ _GEO_IP_BLOCKS = None
+ _WORKING = True
+
+ _LOGIN_HINTS = {
+ 'any': 'Use --cookies, --username and --password or --netrc to provide account credentials',
+ 'cookies': (
+ 'Use --cookies for the authentication. '
+ 'See https://github.com/ytdl-org/youtube-dl#how-do-i-pass-cookies-to-youtube-dl for how to pass cookies'),
+ 'password': 'Use --username and --password or --netrc to provide account credentials',
+ }
+
+ def __init__(self, downloader=None):
+ """Constructor. Receives an optional downloader."""
+ self._ready = False
+ self._x_forwarded_for_ip = None
+ self._printed_messages = set()
+ self.set_downloader(downloader)
+
+ @classmethod
+ def _match_valid_url(cls, url):
+ # This does not use has/getattr intentionally - we want to know whether
+ # we have cached the regexp for *this* class, whereas getattr would also
+ # match the superclass
+ if '_VALID_URL_RE' not in cls.__dict__:
+ cls._VALID_URL_RE = re.compile(cls._VALID_URL)
+ return cls._VALID_URL_RE.match(url)
+
+ @classmethod
+ def suitable(cls, url):
+ """Receives a URL and returns True if suitable for this IE."""
+ # This function must import everything it needs (except other extractors),
+ # so that lazy_extractors works correctly
+ return cls._match_valid_url(url) is not None
+
+ @classmethod
+ def _match_id(cls, url):
+ return cls._match_valid_url(url).group('id')
+
+ @classmethod
+ def get_temp_id(cls, url):
+ try:
+ return cls._match_id(url)
+ except (IndexError, AttributeError):
+ return None
+
+ @classmethod
+ def working(cls):
+ """Getter method for _WORKING."""
+ return cls._WORKING
+
+ def initialize(self):
+ """Initializes an instance (authentication, etc)."""
+ self._printed_messages = set()
+ if not self._ready:
+ self._real_initialize()
+ self._ready = True
+
+ def extract(self, url):
+ """Extracts URL information and returns it in list of dicts."""
+ try:
+ for _ in range(2):
+ try:
+ self.initialize()
+ self.write_debug('Extracting URL: %s' % url)
+ ie_result = self._real_extract(url)
+ if ie_result is None:
+ return None
+ if self._x_forwarded_for_ip:
+ ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip
+ subtitles = ie_result.get('subtitles')
+ if (subtitles and 'live_chat' in subtitles
+ and 'no-live-chat' in self.get_param('compat_opts', [])):
+ del subtitles['live_chat']
+ return ie_result
+ except GeoRestrictedError as e:
+ if self.__maybe_fake_ip_and_retry(e.countries):
+ continue
+ raise
+ except ExtractorError as e:
+ video_id = e.video_id or self.get_temp_id(url)
+ raise ExtractorError(
+ e.msg, video_id=video_id, ie=self.IE_NAME, tb=e.traceback, expected=e.expected, cause=e.cause)
+ except compat_http_client.IncompleteRead as e:
+ raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url))
+ except (KeyError, StopIteration) as e:
+ raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))
+
+ def set_downloader(self, downloader):
+ """Sets the downloader for this IE."""
+ self._downloader = downloader
+
+ def _real_initialize(self):
+ """Real initialization process. Redefine in subclasses."""
+ pass
+
+ def _real_extract(self, url):
+ """Real extraction process. Redefine in subclasses."""
+ pass
+
+ @classmethod
+ def ie_key(cls):
+ """A string for getting the InfoExtractor with get_info_extractor"""
+ return cls.__name__[:-2]
+
+ @property
+ def IE_NAME(self):
+ return compat_str(type(self).__name__[:-2])
+
+ @staticmethod
+ def __can_accept_status_code(err, expected_status):
+ assert isinstance(err, compat_urllib_error.HTTPError)
+ if expected_status is None:
+ return False
+ elif callable(expected_status):
+ return expected_status(err.code) is True
+ else:
+ return err.code in variadic(expected_status)
+
+ def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None):
+ """
+ Return the response handle.
+
+ See _download_webpage docstring for arguments specification.
+ """
+ if not self._downloader._first_webpage_request:
+ sleep_interval = float_or_none(self.get_param('sleep_interval_requests')) or 0
+ if sleep_interval > 0:
+ self.to_screen('Sleeping %s seconds ...' % sleep_interval)
+ time.sleep(sleep_interval)
+ else:
+ self._downloader._first_webpage_request = False
+
+ if note is None:
+ self.report_download_webpage(video_id)
+ elif note is not False:
+ if video_id is None:
+ self.to_screen('%s' % (note,))
+ else:
+ self.to_screen('%s: %s' % (video_id, note))
+
+ # Some sites check X-Forwarded-For HTTP header in order to figure out
+ # the origin of the client behind proxy. This allows bypassing geo
+ # restriction by faking this header's value to IP that belongs to some
+ # geo unrestricted country. We will do so once we encounter any
+ # geo restriction error.
+ if self._x_forwarded_for_ip:
+ if 'X-Forwarded-For' not in headers:
+ headers['X-Forwarded-For'] = self._x_forwarded_for_ip
+
+ if isinstance(url_or_request, compat_urllib_request.Request):
+ url_or_request = update_Request(
+ url_or_request, data=data, headers=headers, query=query)
+ else:
+ if query:
+ url_or_request = update_url_query(url_or_request, query)
+ if data is not None or headers:
+ url_or_request = sanitized_Request(url_or_request, data, headers)
+ try:
+ return self._downloader.urlopen(url_or_request)
+ except network_exceptions as err:
+ if isinstance(err, compat_urllib_error.HTTPError):
+ if self.__can_accept_status_code(err, expected_status):
+ # Retain reference to error to prevent file object from
+ # being closed before it can be read. Works around the
+ # effects of
+ # introduced in Python 3.4.1.
+ err.fp._error = err
+ return err.fp
+
+ if errnote is False:
+ return False
+ if errnote is None:
+ errnote = 'Unable to download webpage'
+
+ errmsg = '%s: %s' % (errnote, error_to_compat_str(err))
+ if fatal:
+ raise ExtractorError(errmsg, sys.exc_info()[2], cause=err)
+ else:
+ self.report_warning(errmsg)
+ return False
+
+ def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None):
+ """
+ Return a tuple (page content as string, URL handle).
+
+ See _download_webpage docstring for arguments specification.
+ """
+ # Strip hashes from the URL (#1038)
+ if isinstance(url_or_request, (compat_str, str)):
+ url_or_request = url_or_request.partition('#')[0]
+
+ urlh = self._request_webpage(url_or_request, video_id, note, errnote, fatal, data=data, headers=headers, query=query, expected_status=expected_status)
+ if urlh is False:
+ assert not fatal
+ return False
+ content = self._webpage_read_content(urlh, url_or_request, video_id, note, errnote, fatal, encoding=encoding)
+ return (content, urlh)
+
+ @staticmethod
+ def _guess_encoding_from_content(content_type, webpage_bytes):
+ m = re.match(r'[a-zA-Z0-9_.-]+/[a-zA-Z0-9_.-]+\s*;\s*charset=(.+)', content_type)
+ if m:
+ encoding = m.group(1)
+ else:
+ m = re.search(br']+charset=[\'"]?([^\'")]+)[ /\'">]',
+ webpage_bytes[:1024])
+ if m:
+ encoding = m.group(1).decode('ascii')
+ elif webpage_bytes.startswith(b'\xff\xfe'):
+ encoding = 'utf-16'
+ else:
+ encoding = 'utf-8'
+
+ return encoding
+
+ def __check_blocked(self, content):
+ first_block = content[:512]
+ if ('Access to this site is blocked' in content
+ and 'Websense' in first_block):
+ msg = 'Access to this webpage has been blocked by Websense filtering software in your network.'
+ blocked_iframe = self._html_search_regex(
+ r'