Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/).

- Fixed CVE-2023-44487 (HTTP/2 Rapid Reset) [PR #1417](https://github.com/3scale/apicast/pull/1417) [THREESCALE-10224](https://issues.redhat.com/browse/THREESCALE-10224)

- Fixed issue where the proxy policy could not handle requests with "Transfer-Encoding: chunked" header [PR #1403](https://github.com/3scale/APIcast/pull/1403) [THREESCALE-9542](https://issues.redhat.com/browse/THREESCALE-9542)

### Added

- Detect number of CPU shares when running on Cgroups V2 [PR #1410](https://github.com/3scale/apicast/pull/1410) [THREESCALE-10167](https://issues.redhat.com/browse/THREESCALE-10167)
Expand Down
148 changes: 121 additions & 27 deletions gateway/src/apicast/http_proxy.lua
Original file line number Diff line number Diff line change
@@ -1,14 +1,30 @@
local format = string.format
local tostring = tostring
local ngx_flush = ngx.flush
local ngx_get_method = ngx.req.get_method
local ngx_http_version = ngx.req.http_version
local ngx_send_headers = ngx.send_headers

local resty_url = require "resty.url"
local resty_resolver = require 'resty.resolver'
local round_robin = require 'resty.balancer.round_robin'
local http_proxy = require 'resty.http.proxy'
local file_reader = require("resty.file").file_reader
local file_size = require("resty.file").file_size
local client_body_reader = require("resty.http.request_reader").get_client_body_reader
local send_response = require("resty.http.response_writer").send_response
local concat = table.concat

local _M = { }

local http_methods_with_body = {
POST = true,
PUT = true,
PATCH = true
}

local DEFAULT_CHUNKSIZE = 32 * 1024

function _M.reset()
_M.balancer = round_robin.new()
_M.resolver = resty_resolver
Expand Down Expand Up @@ -82,15 +98,50 @@ local function absolute_url(uri)
)
end

local function forward_https_request(proxy_uri, proxy_auth, uri, skip_https_connect)
-- This is needed to call ngx.req.get_body_data() below.
ngx.req.read_body()
local function forward_https_request(proxy_uri, uri, proxy_opts)
local body, err
local sock
local opts = proxy_opts or {}
local req_method = ngx_get_method()
local encoding = ngx.req.get_headers()["Transfer-Encoding"]
local is_chunked = encoding and encoding:lower() == "chunked"
local content_type = ngx.req.get_headers()["Content-Type"]
local content_type_is_urlencoded = content_type and content_type:lower() == "application/x-www-form-urlencoded"
local raw = false

if http_methods_with_body[req_method] then

-- When the content type is "application/x-www-form-urlencoded" the body is always pre-read.
-- See: gateway/src/apicast/configuration/service.lua:214
--
-- Due to this, ngx.req.socket() will fail with "request body already exists" error or return
-- socket but hang on read in case of raw socket. Therefore, we only retrieve body from the
-- socket if the content type is not "application/x-www-form-urlencoded"
if opts.request_unbuffered and ngx_http_version() == 1.1 and not content_type_is_urlencoded then
if is_chunked then
-- The default ngx reader does not support chunked request
-- so we will need to get the raw request socket and manually
-- decode the chunked request
sock, err = ngx.req.socket(true)
raw = true
else
sock, err = ngx.req.socket()
end

local request = {
uri = uri,
method = ngx.req.get_method(),
headers = ngx.req.get_headers(0, true),
path = format('%s%s%s', ngx.var.uri, ngx.var.is_args, ngx.var.query_string or ''),
if not sock then
ngx.log(ngx.ERR, "unable to obtain request socket: ", err)
return ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR)
end

body = client_body_reader(sock, DEFAULT_CHUNKSIZE, is_chunked)
else
-- TODO: Due to ngx.req.read_body(). The current implementation will not work with grpc service
-- See: https://github.com/3scale/APIcast/pull/1419
-- Should we get the body from socket by default and only read buffered body if
-- "Content-Type: application/x-www-form-urlencoded"?
--
-- This is needed to call ngx.req.get_body_data() below.
ngx.req.read_body()

-- We cannot use resty.http's .get_client_body_reader().
-- In POST requests with HTTPS, the result of that call is nil, and it
Expand All @@ -101,26 +152,55 @@ local function forward_https_request(proxy_uri, proxy_auth, uri, skip_https_conn
-- read and need to be cached in a local file. This request will return
-- nil, so after this we need to read the temp file.
-- https://github.com/openresty/lua-nginx-module#ngxreqget_body_data
body = ngx.req.get_body_data(),
proxy_uri = proxy_uri,
proxy_auth = proxy_auth
}
body = ngx.req.get_body_data()

if not body then
local temp_file_path = ngx.req.get_body_file()
ngx.log(ngx.INFO, "HTTPS Proxy: Request body is bigger than client_body_buffer_size, read the content from path='", temp_file_path, "'")

if temp_file_path then
body, err = file_reader(temp_file_path)
if err then
ngx.log(ngx.ERR, "HTTPS proxy: Failed to read temp body file, err: ", err)
ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR)
end

if is_chunked then
-- If the body is smaller than "client_boby_buffer_size" the Content-Length header is
-- set by openresty based on the size of the buffer. However, when the body is rendered
-- to a file, we will need to calculate and manually set the Content-Length header based
-- on the file size
local contentLength, err = file_size(temp_file_path)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this safe to do for ALL requests which meet these conditions? I see that the calls in the file_size function are I/O blocking calls so I am wondering how harmful to performance those could be given they are not executed within a coroutine. If a coroutine cannot be used then we should consider using the lua-io-nginx module for example.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it enough to wrap that functionality with a coroutine? I don't know how useful that would be since it would yield on the first call anyway. Also the file_reader also call io.open all every request that has body buffered to file, so I guess we pay the price of calling io.open one more time?

But I totally agree with you that it is a I/O blocking function and should be avoided.

Checking the module lua-io-nginx I can see that this module is currently considered experimental. And it seems like it runs the task on another thread. However, I'm not so sure about this because we have to pay for context switching, threads, locking, etc.

It's worth to mention that the cost time of a single I/O operation won't be reduced, it was just
transferred from the main thread (the one executes the event loop) to another exclusive thread.
Indeed, the overhead might be a little higher, because of the extra tasks transferring, lock waiting,
Lua coroutine resumption (and can only be resumed in the next event loop) and so forth. Nevertheless,
after the offloading, the main thread doesn't block due to the I/O operation, and this is the fundamental
advantage compared with the native Lua I/O library.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No sure how expensive is

function fsize (filename)
      local handle, err = open(filename)
      local current = handle:seek()      -- get current position
      local size = handle:seek("end")    -- get file size
      handle:seek("set", current)        -- restore position
      return size
    end

Theoretically any IO operation could block the thread. We could try coroutines or any other mean to make it non blocking. Reading lua-nginx-module introduction it says:

Disk operations with relatively small amount of data can be done using the standard Lua io library but huge file reading and writing should be avoided wherever possible as they may block the Nginx process significantly. Delegating all network and disk I/O operations to Nginx's subrequests (via the [ngx.location.capture](https://github.com/openresty/lua-nginx-module#ngxlocationcapture) method and similar) is strongly recommended for maximum performance.

Not sure if we can follow that recommendation, though. @tkan145 we can try to discuss about this in a short call..

Anyway, AFAIK, we have never measured the capacity of APICast to handle traffic with request body big enough to be persisted in disk. All the tests performed where "simple" GET requests.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fixed and also updated README file. @kevprice83 can you help review the README file and let me know if I need to add anything else?

if err then
ngx.log(ngx.ERR, "HTTPS proxy: Failed to set content length, err: ", err)
ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR)
end

ngx.req.set_header("Content-Length", tostring(contentLength))
end
end
end

if not request.body then
local temp_file_path = ngx.req.get_body_file()
ngx.log(ngx.INFO, "HTTPS Proxy: Request body is bigger than client_body_buffer_size, read the content from path='", temp_file_path, "'")

if temp_file_path then
local body, err = file_reader(temp_file_path)
if err then
ngx.log(ngx.ERR, "HTTPS proxy: Failed to read temp body file, err: ", err)
ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR)
end
request.body = body
-- The whole request is buffered with chunked encoding removed, so remove the Transfer-Encoding: chunked
-- header, otherwise the upstream won't be able to read the body as it expected chunk encoded
-- body
if is_chunked then
ngx.req.set_header("Transfer-Encoding", nil)
end
end
end

local httpc, err = http_proxy.new(request, skip_https_connect)
local request = {
uri = uri,
method = ngx.req.get_method(),
headers = ngx.req.get_headers(0, true),
path = format('%s%s%s', ngx.var.uri, ngx.var.is_args, ngx.var.query_string or ''),
body = body,
proxy_uri = proxy_uri,
proxy_auth = opts.proxy_auth
}

local httpc, err = http_proxy.new(request, opts.skip_https_connect)

if not httpc then
ngx.log(ngx.ERR, 'could not connect to proxy: ', proxy_uri, ' err: ', err)
Expand All @@ -132,8 +212,16 @@ local function forward_https_request(proxy_uri, proxy_auth, uri, skip_https_conn
res, err = httpc:request(request)

if res then
httpc:proxy_response(res)
httpc:set_keepalive()
if opts.request_unbuffered and raw then
local bytes, err = send_response(sock, res, DEFAULT_CHUNKSIZE)
if not bytes then
ngx.log(ngx.ERR, "failed to send response: ", err)
return sock:send("HTTP/1.1 502 Bad Gateway")
end
else
httpc:proxy_response(res)
httpc:set_keepalive()
end
else
ngx.log(ngx.ERR, 'failed to proxy request to: ', proxy_uri, ' err : ', err)
return ngx.exit(ngx.HTTP_BAD_GATEWAY)
Expand Down Expand Up @@ -186,7 +274,13 @@ function _M.request(upstream, proxy_uri)
return
elseif uri.scheme == 'https' then
upstream:rewrite_request()
forward_https_request(proxy_uri, proxy_auth, uri, upstream.skip_https_connect)
local proxy_opts = {
proxy_auth = proxy_auth,
skip_https_connect = upstream.skip_https_connect,
request_unbuffered = upstream.request_unbuffered
}

forward_https_request(proxy_uri, uri, proxy_opts)
return ngx.exit(ngx.OK) -- terminate phase
else
ngx.log(ngx.ERR, 'could not connect to proxy: ', proxy_uri, ' err: ', 'invalid request scheme')
Expand Down
5 changes: 5 additions & 0 deletions gateway/src/apicast/policy/request_unbuffered/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,8 @@ This policy allows to disable request buffering
}
```

## Caveats

- Because APIcast allows defining mapping rules based on request content, POST requests with
`Content-type: application/x-www-form-urlencoded` will always be buffered regardless of the
`request_unbuffered` policy is enabled or not.
1 change: 1 addition & 0 deletions gateway/src/apicast/upstream.lua
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,7 @@ function _M:call(context)
self:set_skip_https_connect_on_proxy();
end

self.request_unbuffered = context.request_unbuffered
http_proxy.request(self, proxy_uri)
else
local err = self:rewrite_request()
Expand Down
16 changes: 16 additions & 0 deletions gateway/src/resty/file.lua
Original file line number Diff line number Diff line change
Expand Up @@ -28,4 +28,20 @@ function _M.file_reader(filename)
end)
end

function _M.file_size(filename)
local handle, err = open(filename)

if err then
return nil, err
end

local current = handle:seek()
local size = handle:seek("end")

handle:seek("set", current)
handle:close()

return size
end

return _M
73 changes: 73 additions & 0 deletions gateway/src/resty/http/request_reader.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
local httpc = require "resty.resolver.http"
local ngx_req = ngx.req

local _M = {
}

local cr_lf = "\r\n"

local function test_expect(sock)
local expect = ngx_req.get_headers()["Expect"]

if expect == "" or ngx_req.http_version == 1.0 then
return true
end

if expect and expect:lower() == "100-continue" then
local _, err = sock:send("HTTP/1.1 100 Continue\r\n\r\n")
if err then
ngx.log(ngx.ERR, "failed to handle expect header, err: ", err)
return false, err
end
end
return true
end

-- chunked_reader return a body reader that translates the data read from
-- lua-resty-http client_body_reader to HTTP "chunked" format before returning it
--
-- The chunked reader return nil when the final 0-length chunk is read
local function chunked_reader(sock, chunksize)
chunksize = chunksize or 65536
local eof = false
local reader = httpc:get_client_body_reader(chunksize, sock)
if not reader then
return nil
end

-- If Expect: 100-continue is sent upstream, lua-resty-http will only call
-- _send_body after receiving "100 Continue". So it's safe to process the
-- Expect header and send "100 Continue" downstream here.
local ok, err = test_expect(sock)
if not ok then
return nil, err
end

return function()
if eof then
return nil
end

local buffer, err = reader()
if err then
return nil, err
end
if buffer then
local chunk = string.format("%x\r\n", #buffer) .. buffer .. cr_lf
return chunk
else
eof = true
return "0\r\n\r\n"
end
end
end

function _M.get_client_body_reader(sock, chunksize, is_chunked)
if is_chunked then
return chunked_reader(sock, chunksize)
else
return httpc:get_client_body_reader(chunksize, sock)
end
end

return _M
Loading