From 3efa69a19f54711f116e7193e2e4d9287dceeab1 Mon Sep 17 00:00:00 2001 From: An Tran Date: Mon, 27 Nov 2023 21:23:01 +1000 Subject: [PATCH 1/8] Fix 'Transfer-Encoding: chunked' issue when sending request via proxy When a request with the HTTP "Transfer-Encoding: chunked" header is sent, APIcast buffers the entire request because by default it does not support sending chunked requests. However, when sending via proxy, APIcast does not remove the header sent in the initial request, which tells the server that the client is sending a chunk request. This then causes an Bad Request error because the upstream will not be able to determine the end of the chunk from the request. This commit removes the "Transfer-Encoding: chunked" header from the request when sending through a proxy. --- gateway/src/apicast/http_proxy.lua | 68 +++-- gateway/src/resty/file.lua | 16 + t/apicast-policy-camel.t | 236 ++++++++++++++ t/apicast-policy-http-proxy.t | 212 +++++++++++++ t/http-proxy.t | 475 +++++++++++++++++++++++++++++ 5 files changed, 984 insertions(+), 23 deletions(-) diff --git a/gateway/src/apicast/http_proxy.lua b/gateway/src/apicast/http_proxy.lua index 624aa0502..4293135c5 100644 --- a/gateway/src/apicast/http_proxy.lua +++ b/gateway/src/apicast/http_proxy.lua @@ -1,10 +1,12 @@ local format = string.format +local tostring = tostring local resty_url = require "resty.url" local resty_resolver = require 'resty.resolver' local round_robin = require 'resty.balancer.round_robin' local http_proxy = require 'resty.http.proxy' local file_reader = require("resty.file").file_reader +local file_size = require("resty.file").file_size local concat = table.concat local _M = { } @@ -86,40 +88,60 @@ local function forward_https_request(proxy_uri, proxy_auth, uri, skip_https_conn -- This is needed to call ngx.req.get_body_data() below. ngx.req.read_body() - local request = { - uri = uri, - method = ngx.req.get_method(), - headers = ngx.req.get_headers(0, true), - path = format('%s%s%s', ngx.var.uri, ngx.var.is_args, ngx.var.query_string or ''), - - -- We cannot use resty.http's .get_client_body_reader(). - -- In POST requests with HTTPS, the result of that call is nil, and it - -- results in a time-out. - -- - -- - -- If ngx.req.get_body_data is nil, can be that the body is too big to - -- read and need to be cached in a local file. This request will return - -- nil, so after this we need to read the temp file. - -- https://github.com/openresty/lua-nginx-module#ngxreqget_body_data - body = ngx.req.get_body_data(), - proxy_uri = proxy_uri, - proxy_auth = proxy_auth - } - - if not request.body then + -- We cannot use resty.http's .get_client_body_reader(). + -- In POST requests with HTTPS, the result of that call is nil, and it + -- results in a time-out. + -- + -- + -- If ngx.req.get_body_data is nil, can be that the body is too big to + -- read and need to be cached in a local file. This request will return + -- nil, so after this we need to read the temp file. + -- https://github.com/openresty/lua-nginx-module#ngxreqget_body_data + local body = ngx.req.get_body_data() + local encoding = ngx.req.get_headers()["Transfer-Encoding"] + + if not body then local temp_file_path = ngx.req.get_body_file() ngx.log(ngx.INFO, "HTTPS Proxy: Request body is bigger than client_body_buffer_size, read the content from path='", temp_file_path, "'") if temp_file_path then - local body, err = file_reader(temp_file_path) + body, err = file_reader(temp_file_path) if err then ngx.log(ngx.ERR, "HTTPS proxy: Failed to read temp body file, err: ", err) ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) end - request.body = body + + if encoding == "chunked" then + -- If the body is smaller than "client_boby_buffer_size" the Content-Length header is + -- set based on the size of the buffer. However, when the body is rendered to a file, + -- we will need to calculate and manually set the Content-Length header based on the + -- file size + local contentLength, err = file_size(temp_file_path) + if err then + ngx.log(ngx.ERR, "HTTPS proxy: Failed to set content length, err: ", err) + ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + end + + ngx.req.set_header("Content-Length", tostring(contentLength)) + end end end + -- The whole request is buffered in the memory so remove the Transfer-Encoding: chunked + if ngx.var.http_transfer_encoding == "chunked" then + ngx.req.set_header("Transfer-Encoding", nil) + end + + local request = { + uri = uri, + method = ngx.req.get_method(), + headers = ngx.req.get_headers(0, true), + path = format('%s%s%s', ngx.var.uri, ngx.var.is_args, ngx.var.query_string or ''), + body = body, + proxy_uri = proxy_uri, + proxy_auth = proxy_auth + } + local httpc, err = http_proxy.new(request, skip_https_connect) if not httpc then diff --git a/gateway/src/resty/file.lua b/gateway/src/resty/file.lua index 40b3a7f0c..592c61059 100644 --- a/gateway/src/resty/file.lua +++ b/gateway/src/resty/file.lua @@ -28,4 +28,20 @@ function _M.file_reader(filename) end) end +function _M.file_size(filename) + local handle, err = open(filename) + + if err then + return nil, err + end + + local current = handle:seek() + local size = handle:seek("end") + + handle:seek("set", current) + handle:close() + + return size +end + return _M diff --git a/t/apicast-policy-camel.t b/t/apicast-policy-camel.t index ddaac389c..2848b4ee8 100644 --- a/t/apicast-policy-camel.t +++ b/t/apicast-policy-camel.t @@ -506,3 +506,239 @@ using proxy: http://foo:bar\@127.0.0.1:$Test::Nginx::Util::PROXY_SSL_PORT, EOF --- no_error_log eval [qr/\[error\]/, qr/\got header line: Proxy-Authorization: Basic Zm9vOmJhcg==/] + + + +=== TEST 8: API backend connection uses http proxy with chunked request +--- configuration +{ + "services": [ + { + "id": 42, + "backend_version": 1, + "backend_authentication_type": "service_token", + "backend_authentication_value": "token-value", + "proxy": { + "api_backend": "http://test-upstream.lvh.me:$TEST_NGINX_SERVER_PORT/", + "proxy_rules": [ + { "pattern": "/", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { + "name": "apicast.policy.apicast" + }, + { + "name": "apicast.policy.camel", + "configuration": { + "http_proxy": "$TEST_NGINX_HTTP_PROXY" + } + } + ] + } + } + ] +} +--- backend + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream + server_name test-upstream.lvh.me; + location / { + access_by_lua_block { + assert = require('luassert') + local content_length = ngx.req.get_headers()["Content-Length"] + local encoding = ngx.req.get_headers()["Transfer-Encoding"] + assert.equal('12', content_length) + assert.falsy(encoding) + } + echo_read_request_body; + echo $request_body; + } +--- more_headers +Transfer-Encoding: chunked +--- request eval +"POST /?user_key=value +7\r +hello, \r +5\r +world\r +0\r +\r +" +--- response_body +hello, world +--- error_code: 200 +--- error_log env +using proxy: $TEST_NGINX_HTTP_PROXY +--- no_error_log +[error] + + + +=== TEST 9: API backend using all_proxy with chunked request +--- configuration +{ + "services": [ + { + "id": 42, + "backend_version": 1, + "backend_authentication_type": "service_token", + "backend_authentication_value": "token-value", + "proxy": { + "api_backend": "http://test-upstream.lvh.me:$TEST_NGINX_SERVER_PORT/", + "proxy_rules": [ + { "pattern": "/", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { + "name": "apicast.policy.apicast" + }, + { + "name": "apicast.policy.http_proxy", + "configuration": { + "all_proxy": "$TEST_NGINX_HTTP_PROXY" + } + } + ] + } + } + ] +} +--- backend + location /transactions/authrep.xml { + content_by_lua_block { + local expected = "service_token=token-value&service_id=42&usage%5Bhits%5D=2&user_key=value" + require('luassert').same(ngx.decode_args(expected), ngx.req.get_uri_args(0)) + } + } +--- upstream + server_name test-upstream.lvh.me; + location / { + access_by_lua_block { + assert = require('luassert') + local content_length = ngx.req.get_headers()["Content-Length"] + local encoding = ngx.req.get_headers()["Transfer-Encoding"] + assert.equal('12', content_length) + assert.falsy(encoding) + } + echo_read_request_body; + echo $request_body; + } +--- more_headers +Transfer-Encoding: chunked +--- request eval +"POST /?user_key=value +7\r +hello, \r +5\r +world\r +0\r +\r +" +--- response_body +hello, world +--- error_code: 200 +--- error_log env +using proxy: $TEST_NGINX_HTTP_PROXY +--- no_error_log +[error] + + + +=== TEST 10: using HTTPS proxy for backend with chunked request +--- init eval +$Test::Nginx::Util::PROXY_SSL_PORT = Test::APIcast::get_random_port(); +$Test::Nginx::Util::ENDPOINT_SSL_PORT = Test::APIcast::get_random_port(); +--- configuration random_port env eval +< $ENV{TEST_NGINX_HTTP_PROXY}, + 'BACKEND_ENDPOINT_OVERRIDE' => "http://test_backend.lvh.me:$ENV{TEST_NGINX_SERVER_PORT}" +) +--- configuration +{ + "services": [ + { + "backend_version": 1, + "proxy": { + "api_backend": "http://test-upstream.lvh.me:$TEST_NGINX_SERVER_PORT", + "proxy_rules": [ + { "pattern": "/test", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ] + } + } + ] +} +--- backend +server_name test_backend.lvh.me; + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream +server_name test-upstream.lvh.me; + location /test { + access_by_lua_block { + assert = require('luassert') + local content_length = ngx.req.get_headers()["Content-Length"] + local encoding = ngx.req.get_headers()["Transfer-Encoding"] + assert.equal('12', content_length) + assert.falsy(encoding) + } + echo_read_request_body; + echo $request_body; + } +--- more_headers +Transfer-Encoding: chunked +--- request eval +"POST /test?user_key=value +7\r +hello, \r +5\r +world\r +0\r +\r +" +--- response_body +hello, world +--- error_code: 200 +--- error_log env +proxy request: POST http://test-upstream.lvh.me:$TEST_NGINX_SERVER_PORT/test?user_key=value HTTP/1.1 +--- no_error_log +[error] + + + +=== TEST 26: Upstream API with HTTPS POST chunked request, HTTPS_PROXY and HTTPS api_backend +--- env random_port eval +( + 'https_proxy' => $ENV{TEST_NGINX_HTTPS_PROXY}, + 'BACKEND_ENDPOINT_OVERRIDE' => "https://test-backend.lvh.me:$ENV{TEST_NGINX_RANDOM_PORT}" +) +--- configuration random_port env +{ + "services": [ + { + "backend_version": 1, + "proxy": { + "api_backend": "https://test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT", + "proxy_rules": [ + { "pattern": "/test", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ] + } + } + ] +} +--- backend env + server_name test-backend.lvh.me; + listen $TEST_NGINX_RANDOM_PORT ssl; + ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; + ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream env +server_name test-upstream.lvh.me; +listen $TEST_NGINX_RANDOM_PORT ssl; +ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; +ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; +location /test { + access_by_lua_block { + assert = require('luassert') + local content_length = ngx.req.get_headers()["Content-Length"] + local encoding = ngx.req.get_headers()["Transfer-Encoding"] + assert.equal('12', content_length) + assert.falsy(encoding) + } + echo_read_request_body; + echo $request_body; +} +--- more_headers +Transfer-Encoding: chunked +--- request eval +"POST /test?user_key=value +7\r +hello, \r +5\r +world\r +0\r +\r +" +--- response_body +hello, world +--- error_code: 200 +--- error_log env +proxy request: CONNECT test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT HTTP/1.1 +--- no_error_log +[error] +--- user_files fixture=tls.pl eval + + + +=== TEST 27: Upstream Policy connection uses proxy with POST chunked request +--- env random_port eval +("http_proxy" => $ENV{TEST_NGINX_HTTP_PROXY}) +--- configuration +{ + "services": [ + { + "proxy": { + "policy_chain": [ + { "name": "apicast.policy.upstream", + "configuration": + { + "rules": [ { "regex": "/test", "url": "http://test-upstream.lvh.me:$TEST_NGINX_SERVER_PORT" } ] + } + } + ] + } + } + ] +} +--- upstream +server_name test-upstream.lvh.me; + location /test { + access_by_lua_block { + assert = require('luassert') + local content_length = ngx.req.get_headers()["Content-Length"] + local encoding = ngx.req.get_headers()["Transfer-Encoding"] + assert.equal('12', content_length) + assert.falsy(encoding) + } + echo_read_request_body; + echo $request_body; + } +--- more_headers +Transfer-Encoding: chunked +--- request eval +"POST /test?user_key=value +7\r +hello, \r +5\r +world\r +0\r +\r +" +--- response_body +hello, world +--- error_code: 200 +--- error_log env +proxy request: POST http://test-upstream.lvh.me:$TEST_NGINX_SERVER_PORT/test?user_key=value HTTP/1.1 +--- no_error_log +[error] + + + +=== TEST 28: Upstream Policy connection uses proxy for https with POST chunked request +--- env eval +("https_proxy" => $ENV{TEST_NGINX_HTTPS_PROXY}) +--- configuration random_port env +{ + "services": [ + { + "proxy": { + "policy_chain": [ + { "name": "apicast.policy.upstream", + "configuration": + { + "rules": [ { "regex": "/test", "url": "https://test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT" } ] + } + } + ] + } + } + ] +} +--- upstream env +server_name test-upstream.lvh.me; +listen $TEST_NGINX_RANDOM_PORT ssl; +ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; +ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; +location /test { + access_by_lua_block { + assert = require('luassert') + local content_length = ngx.req.get_headers()["Content-Length"] + local encoding = ngx.req.get_headers()["Transfer-Encoding"] + assert.equal('12', content_length) + assert.falsy(encoding) + } + echo_read_request_body; + echo $request_body; +} +--- more_headers +Transfer-Encoding: chunked +--- request eval +"POST /test?user_key=value +7\r +hello, \r +5\r +world\r +0\r +\r +" +--- response_body +hello, world +--- error_code: 200 +--- error_log env +proxy request: CONNECT test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT HTTP/1.1 +--- no_error_log +[error] +--- user_files fixture=tls.pl eval + + + +=== TEST 29: Upstream policy with HTTPS POST chunked request, HTTPS_PROXY and HTTPS backend +--- env random_port eval +( + 'https_proxy' => $ENV{TEST_NGINX_HTTPS_PROXY}, + 'BACKEND_ENDPOINT_OVERRIDE' => "https://test-backend.lvh.me:$ENV{TEST_NGINX_RANDOM_PORT}" +) +--- configuration random_port env +{ + "services": [ + { + "backend_version": 1, + "proxy": { + "api_backend": "https://test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT", + "proxy_rules": [ + { "pattern": "/test", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { "name": "apicast.policy.upstream", + "configuration": + { + "rules": [ { "regex": "/test", "url": "https://test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT" } ] + } + }, + { + "name": "apicast.policy.apicast" + } + ] + } + } + ] +} +--- backend env + server_name test-backend.lvh.me; + listen $TEST_NGINX_RANDOM_PORT ssl; + ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; + ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream env +server_name test-upstream.lvh.me; +listen $TEST_NGINX_RANDOM_PORT ssl; +ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; +ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; +location /test { + access_by_lua_block { + assert = require('luassert') + local content_length = ngx.req.get_headers()["Content-Length"] + local encoding = ngx.req.get_headers()["Transfer-Encoding"] + assert.equal('12', content_length) + assert.falsy(encoding) + } + echo_read_request_body; + echo $request_body; +} +--- more_headers +Transfer-Encoding: chunked +--- request eval +"POST /test?user_key=value +7\r +hello, \r +5\r +world\r +0\r +\r +" +--- response_body +hello, world +--- error_code: 200 +--- error_log env +proxy request: CONNECT test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT HTTP/1.1 +--- no_error_log +[error] +--- user_files fixture=tls.pl eval + + + +=== TEST 30: Upstream policy with HTTPS POST chunked request, HTTPS_PROXY and HTTPS backend and lower +case header +--- env random_port eval +( + 'https_proxy' => $ENV{TEST_NGINX_HTTPS_PROXY}, + 'BACKEND_ENDPOINT_OVERRIDE' => "https://test-backend.lvh.me:$ENV{TEST_NGINX_RANDOM_PORT}" +) +--- configuration random_port env +{ + "services": [ + { + "backend_version": 1, + "proxy": { + "api_backend": "https://test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT", + "proxy_rules": [ + { "pattern": "/test", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { "name": "apicast.policy.upstream", + "configuration": + { + "rules": [ { "regex": "/test", "url": "https://test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT" } ] + } + }, + { + "name": "apicast.policy.apicast" + } + ] + } + } + ] +} +--- backend env + server_name test-backend.lvh.me; + listen $TEST_NGINX_RANDOM_PORT ssl; + ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; + ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream env +server_name test-upstream.lvh.me; +listen $TEST_NGINX_RANDOM_PORT ssl; +ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; +ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; +location /test { + access_by_lua_block { + assert = require('luassert') + local content_length = ngx.req.get_headers()["Content-Length"] + local encoding = ngx.req.get_headers()["Transfer-Encoding"] + assert.equal('12', content_length) + assert.falsy(encoding) + } + echo_read_request_body; + echo $request_body; +} +--- more_headers +transfer-encoding: chunked +--- request eval +"POST /test?user_key=value +7\r +hello, \r +5\r +world\r +0\r +\r +" +--- response_body +hello, world +--- error_code: 200 +--- error_log env +proxy request: CONNECT test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT HTTP/1.1 +--- no_error_log +[error] +--- user_files fixture=tls.pl eval + + + +=== TEST 31: POST chunked request with random chunk size, HTTPS_PROXY and HTTPS api_backend +this will also test the case when the request body is big and saved to a temp file +--- env random_port eval +( + 'https_proxy' => $ENV{TEST_NGINX_HTTPS_PROXY}, + 'BACKEND_ENDPOINT_OVERRIDE' => "https://test-backend.lvh.me:$ENV{TEST_NGINX_RANDOM_PORT}" +) +--- configuration random_port env +{ + "services": [ + { + "backend_version": 1, + "proxy": { + "api_backend": "https://test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT", + "proxy_rules": [ + { "pattern": "/", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ] + } + } + ] +} +--- backend env + server_name test-backend.lvh.me; + listen $TEST_NGINX_RANDOM_PORT ssl; + ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; + ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; + + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream env +server_name test-upstream.lvh.me; +listen $TEST_NGINX_RANDOM_PORT ssl; + +ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; +ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; + +location / { + echo_read_request_body; + echo_request_body; +} + +--- more_headers +Transfer-Encoding: chunked +--- request eval +$::data = ''; +my $count = (int rand 32766) + 1; +for (my $i = 0; $i < $count; $i++) { + my $c = chr int rand 128; + $::data .= $c; +} +my $s = "POST /test?user_key=value +". +sprintf("%x\r\n", length $::data). +$::data +."\r +0\r +\r +"; +open my $out, '>/tmp/out.txt' or die $!; +print $out $s; +close $out; +$s +--- response_body eval +$::data +--- error_log env +proxy request: CONNECT test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT HTTP/1.1 +--- no_error_log +[error] +--- user_files fixture=tls.pl eval From b02a8883f5d8a8deabc5ad208fdc9ce638655047 Mon Sep 17 00:00:00 2001 From: An Tran Date: Mon, 27 Nov 2023 21:23:07 +1000 Subject: [PATCH 2/8] Support chunked requests when talking to proxy servers with request buffering disabled --- gateway/src/apicast/http_proxy.lua | 171 ++++++-- gateway/src/apicast/upstream.lua | 1 + gateway/src/resty/http/request_reader.lua | 47 ++ gateway/src/resty/http/response_writer.lua | 58 +++ t/apicast-policy-camel.t | 480 +++++++++++++++++++++ t/apicast-policy-http-proxy.t | 439 +++++++++++++++++++ t/http-proxy.t | 288 +++++++++++++ 7 files changed, 1439 insertions(+), 45 deletions(-) create mode 100644 gateway/src/resty/http/request_reader.lua create mode 100644 gateway/src/resty/http/response_writer.lua diff --git a/gateway/src/apicast/http_proxy.lua b/gateway/src/apicast/http_proxy.lua index 4293135c5..abbd9c634 100644 --- a/gateway/src/apicast/http_proxy.lua +++ b/gateway/src/apicast/http_proxy.lua @@ -1,5 +1,9 @@ local format = string.format local tostring = tostring +local ngx_flush = ngx.flush +local ngx_get_method = ngx.req.get_method +local ngx_http_version = ngx.req.http_version +local ngx_send_headers = ngx.send_headers local resty_url = require "resty.url" local resty_resolver = require 'resty.resolver' @@ -7,10 +11,20 @@ local round_robin = require 'resty.balancer.round_robin' local http_proxy = require 'resty.http.proxy' local file_reader = require("resty.file").file_reader local file_size = require("resty.file").file_size +local client_body_reader = require("resty.http.request_reader").get_client_body_reader +local send_response = require("resty.http.response_writer").send_response local concat = table.concat local _M = { } +local http_methods_with_body = { + POST = true, + PUT = true, + PATCH = true +} + +local DEFAULT_CHUNKSIZE = 32 * 1024 + function _M.reset() _M.balancer = round_robin.new() _M.resolver = resty_resolver @@ -84,52 +98,105 @@ local function absolute_url(uri) ) end -local function forward_https_request(proxy_uri, proxy_auth, uri, skip_https_connect) - -- This is needed to call ngx.req.get_body_data() below. - ngx.req.read_body() - - -- We cannot use resty.http's .get_client_body_reader(). - -- In POST requests with HTTPS, the result of that call is nil, and it - -- results in a time-out. - -- - -- - -- If ngx.req.get_body_data is nil, can be that the body is too big to - -- read and need to be cached in a local file. This request will return - -- nil, so after this we need to read the temp file. - -- https://github.com/openresty/lua-nginx-module#ngxreqget_body_data - local body = ngx.req.get_body_data() +local function handle_expect() + local expect = ngx.req.get_headers()["Expect"] + if type(expect) == "table" then + expect = expect[1] + end + + if expect and expect:lower() == "100-continue" then + ngx.status = 100 + local ok, err = ngx_send_headers() + + if not ok then + return nil, "failed to send response header: " .. (err or "unknown") + end + + ok, err = ngx_flush(true) + if not ok then + return nil, "failed to flush response header: " .. (err or "unknown") + end + end +end + +local function forward_https_request(proxy_uri, uri, proxy_opts) + local body, err + local sock + local opts = proxy_opts or {} + local req_method = ngx_get_method() local encoding = ngx.req.get_headers()["Transfer-Encoding"] + local is_chunked = encoding and encoding:lower() == "chunked" + + if http_methods_with_body[req_method] then + if opts.request_unbuffered and ngx_http_version() == 1.1 then + local _, err = handle_expect() + if err then + ngx.log(ngx.ERR, "failed to handle expect header, err: ", err) + return ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + end - if not body then - local temp_file_path = ngx.req.get_body_file() - ngx.log(ngx.INFO, "HTTPS Proxy: Request body is bigger than client_body_buffer_size, read the content from path='", temp_file_path, "'") - - if temp_file_path then - body, err = file_reader(temp_file_path) - if err then - ngx.log(ngx.ERR, "HTTPS proxy: Failed to read temp body file, err: ", err) - ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) - end - - if encoding == "chunked" then - -- If the body is smaller than "client_boby_buffer_size" the Content-Length header is - -- set based on the size of the buffer. However, when the body is rendered to a file, - -- we will need to calculate and manually set the Content-Length header based on the - -- file size - local contentLength, err = file_size(temp_file_path) - if err then - ngx.log(ngx.ERR, "HTTPS proxy: Failed to set content length, err: ", err) + if is_chunked then + -- The default ngx reader does not support chunked request + -- so we will need to get the raw request socket and manually + -- decode the chunked request + sock, err = ngx.req.socket(true) + else + sock, err = ngx.req.socket() + end + + if not sock then + ngx.log(ngx.ERR, "unable to obtain request socket: ", err) + return ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + end + + body = client_body_reader(sock, DEFAULT_CHUNKSIZE, is_chunked) + else + -- This is needed to call ngx.req.get_body_data() below. + ngx.req.read_body() + + -- We cannot use resty.http's .get_client_body_reader(). + -- In POST requests with HTTPS, the result of that call is nil, and it + -- results in a time-out. + -- + -- + -- If ngx.req.get_body_data is nil, can be that the body is too big to + -- read and need to be cached in a local file. This request will return + -- nil, so after this we need to read the temp file. + -- https://github.com/openresty/lua-nginx-module#ngxreqget_body_data + body = ngx.req.get_body_data() + + if not body then + local temp_file_path = ngx.req.get_body_file() + ngx.log(ngx.INFO, "HTTPS Proxy: Request body is bigger than client_body_buffer_size, read the content from path='", temp_file_path, "'") + + if temp_file_path then + body, err = file_reader(temp_file_path) + if err then + ngx.log(ngx.ERR, "HTTPS proxy: Failed to read temp body file, err: ", err) ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + end + + if is_chunked then + -- If the body is smaller than "client_boby_buffer_size" the Content-Length header is + -- set based on the size of the buffer. However, when the body is rendered to a file, + -- we will need to calculate and manually set the Content-Length header based on the + -- file size + local contentLength, err = file_size(temp_file_path) + if err then + ngx.log(ngx.ERR, "HTTPS proxy: Failed to set content length, err: ", err) + ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) + end + + ngx.req.set_header("Content-Length", tostring(contentLength)) + end end - - ngx.req.set_header("Content-Length", tostring(contentLength)) - end end - end - -- The whole request is buffered in the memory so remove the Transfer-Encoding: chunked - if ngx.var.http_transfer_encoding == "chunked" then - ngx.req.set_header("Transfer-Encoding", nil) + -- The whole request is buffered in the memory so remove the Transfer-Encoding: chunked + if is_chunked then + ngx.req.set_header("Transfer-Encoding", nil) + end + end end local request = { @@ -139,10 +206,10 @@ local function forward_https_request(proxy_uri, proxy_auth, uri, skip_https_conn path = format('%s%s%s', ngx.var.uri, ngx.var.is_args, ngx.var.query_string or ''), body = body, proxy_uri = proxy_uri, - proxy_auth = proxy_auth + proxy_auth = opts.proxy_auth } - local httpc, err = http_proxy.new(request, skip_https_connect) + local httpc, err = http_proxy.new(request, opts.skip_https_connect) if not httpc then ngx.log(ngx.ERR, 'could not connect to proxy: ', proxy_uri, ' err: ', err) @@ -154,8 +221,16 @@ local function forward_https_request(proxy_uri, proxy_auth, uri, skip_https_conn res, err = httpc:request(request) if res then - httpc:proxy_response(res) - httpc:set_keepalive() + if opts.request_unbuffered and is_chunked then + local bytes, err = send_response(sock, res, DEFAULT_CHUNKSIZE) + if not bytes then + ngx.log(ngx.ERR, "failed to send response: ", err) + return sock:send("HTTP/1.1 502 Bad Gateway") + end + else + httpc:proxy_response(res) + httpc:set_keepalive() + end else ngx.log(ngx.ERR, 'failed to proxy request to: ', proxy_uri, ' err : ', err) return ngx.exit(ngx.HTTP_BAD_GATEWAY) @@ -208,7 +283,13 @@ function _M.request(upstream, proxy_uri) return elseif uri.scheme == 'https' then upstream:rewrite_request() - forward_https_request(proxy_uri, proxy_auth, uri, upstream.skip_https_connect) + local proxy_opts = { + proxy_auth = proxy_auth, + skip_https_connect = upstream.skip_https_connect, + request_unbuffered = upstream.request_unbuffered + } + + forward_https_request(proxy_uri, uri, proxy_opts) return ngx.exit(ngx.OK) -- terminate phase else ngx.log(ngx.ERR, 'could not connect to proxy: ', proxy_uri, ' err: ', 'invalid request scheme') diff --git a/gateway/src/apicast/upstream.lua b/gateway/src/apicast/upstream.lua index 0aff47359..43c395c68 100644 --- a/gateway/src/apicast/upstream.lua +++ b/gateway/src/apicast/upstream.lua @@ -241,6 +241,7 @@ function _M:call(context) self:set_skip_https_connect_on_proxy(); end + self.request_unbuffered = context.request_unbuffered http_proxy.request(self, proxy_uri) else local err = self:rewrite_request() diff --git a/gateway/src/resty/http/request_reader.lua b/gateway/src/resty/http/request_reader.lua new file mode 100644 index 000000000..6b79e82d2 --- /dev/null +++ b/gateway/src/resty/http/request_reader.lua @@ -0,0 +1,47 @@ +local httpc = require "resty.resolver.http" + +local _M = { +} + +local cr_lf = "\r\n" + +-- chunked_reader return a body reader that translates the data read from +-- lua-resty-http client_body_reader to HTTP "chunked" format before returning it +-- +-- The chunked reader return nil when the final 0-length chunk is read +local function chunked_reader(sock, chunksize) + chunksize = chunksize or 65536 + local eof = false + local reader = httpc:get_client_body_reader(chunksize, sock) + if not reader then + return nil + end + + return function() + if eof then + return nil + end + + local buffer, err = reader() + if err then + return nil, err + end + if buffer then + local chunk = string.format("%x\r\n", #buffer) .. buffer .. cr_lf + return chunk + else + eof = true + return "0\r\n\r\n" + end + end +end + +function _M.get_client_body_reader(sock, chunksize, is_chunked) + if is_chunked then + return chunked_reader(sock, chunksize) + else + return httpc:get_client_body_reader(chunksize, sock) + end +end + +return _M diff --git a/gateway/src/resty/http/response_writer.lua b/gateway/src/resty/http/response_writer.lua new file mode 100644 index 000000000..9ab849676 --- /dev/null +++ b/gateway/src/resty/http/response_writer.lua @@ -0,0 +1,58 @@ +local _M = { +} + +local cr_lf = "\r\n" + +local function send(socket, data) + if not data or data == '' then + ngx.log(ngx.DEBUG, 'skipping sending nil') + return + end + + return socket:send(data) +end + +-- write_response writes response body reader to sock in the HTTP/1.x server response format, +-- The connection is closed if send() fails or when returning a non-zero +function _M.send_response(sock, response, chunksize) + local bytes, err + chunksize = chunksize or 65536 + + if not response then + ngx.log(ngx.ERR, "no response provided") + return + end + + if not sock then + return nil, "socket not initialized yet" + end + + -- Status line + local status = "HTTP/1.1 " .. response.status .. " " .. response.reason .. cr_lf + bytes, err = send(sock, status) + if not bytes then + return nil, "failed to send status line, err: " .. (err or "unknown") + end + + -- Write body + local reader = response.body_reader + repeat + local chunk, read_err + + chunk, read_err = reader(chunksize) + if read_err then + return nil, "failed to read response body, err: " .. (err or "unknown") + end + + if chunk then + bytes, err = send(sock, chunk) + if not bytes then + return nil, "failed to send response body, err: " .. (err or "unknown") + end + end + until not chunk + + return true, nil +end + +return _M diff --git a/t/apicast-policy-camel.t b/t/apicast-policy-camel.t index 2848b4ee8..6577e6391 100644 --- a/t/apicast-policy-camel.t +++ b/t/apicast-policy-camel.t @@ -3,6 +3,16 @@ use Test::APIcast::Blackbox 'no_plan'; require("http_proxy.pl"); +sub large_body { + my $res = ""; + for (my $i=0; $i <= 1024; $i++) { + $res = $res . "1111111 1111111 1111111 1111111\n"; + } + return $res; +} + +$ENV{'LARGE_BODY'} = large_body(); + repeat_each(1); run_tests(); @@ -742,3 +752,473 @@ EOF --- no_error_log [error] --- user_files fixture=tls.pl eval + + + +=== TEST 11: http_proxy with request_unbuffered policy +--- configuration +{ + "services": [ + { + "id": 42, + "backend_version": 1, + "backend_authentication_type": "service_token", + "backend_authentication_value": "token-value", + "proxy": { + "api_backend": "http://test-upstream.lvh.me:$TEST_NGINX_SERVER_PORT/", + "proxy_rules": [ + { "pattern": "/", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { + "name": "request_unbuffered" + }, + { + "name": "apicast.policy.apicast" + }, + { + "name": "apicast.policy.camel", + "configuration": { + "http_proxy": "http://127.0.0.1:$TEST_NGINX_HTTP_PROXY_PORT" + } + } + ] + } + } + ] +} +--- backend + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream + server_name test-upstream.lvh.me; + location / { + echo_read_request_body; + echo_request_body; + } +--- request eval +"POST /?user_key= \n" . $ENV{LARGE_BODY} +--- response_body eval chomp +$ENV{LARGE_BODY} +--- error_code: 200 +--- error_log env +using proxy: http://127.0.0.1:$TEST_NGINX_HTTP_PROXY_PORT +--- no_error_log +[error] +--- grep_error_log eval +qr/a client request body is buffered to a temporary file/ +--- grep_error_log_out +a client request body is buffered to a temporary file + + + +=== TEST 12: http_proxy with request_unbuffered policy and chunked body +--- configuration +{ + "services": [ + { + "id": 42, + "backend_version": 1, + "backend_authentication_type": "service_token", + "backend_authentication_value": "token-value", + "proxy": { + "api_backend": "http://test-upstream.lvh.me:$TEST_NGINX_SERVER_PORT/", + "proxy_rules": [ + { "pattern": "/", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { + "name": "request_unbuffered" + }, + { + "name": "apicast.policy.apicast" + }, + { + "name": "apicast.policy.camel", + "configuration": { + "http_proxy": "http://127.0.0.1:$TEST_NGINX_HTTP_PROXY_PORT" + } + } + ] + } + } + ] +} +--- backend + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream + server_name test-upstream.lvh.me; + location / { + access_by_lua_block { + assert = require('luassert') + local content_length = ngx.req.get_headers()["Content-Length"] + local encoding = ngx.req.get_headers()["Transfer-Encoding"] + assert.equal('chunked', encoding) + assert.falsy(content_length) + } + echo_read_request_body; + echo_request_body; + } +--- more_headers +Transfer-Encoding: chunked +--- request eval +"POST /?user_key=value +". +sprintf("%x\r\n", length $ENV{"LARGE_BODY"}). +$ENV{LARGE_BODY} +."\r +0\r +\r +" +--- response_body eval +$ENV{LARGE_BODY} +--- error_code: 200 +--- error_log env +using proxy: http://127.0.0.1:$TEST_NGINX_HTTP_PROXY_PORT +--- no_error_log +[error] +--- grep_error_log eval +qr/a client request body is buffered to a temporary file/ +--- grep_error_log_out +a client request body is buffered to a temporary file + + + +=== TEST 13: all_proxy with request_unbuffered policy +--- configuration +{ + "services": [ + { + "id": 42, + "backend_version": 1, + "backend_authentication_type": "service_token", + "backend_authentication_value": "token-value", + "proxy": { + "api_backend": "http://test-upstream.lvh.me:$TEST_NGINX_SERVER_PORT/", + "proxy_rules": [ + { "pattern": "/", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { + "name": "request_unbuffered" + }, + { + "name": "apicast.policy.apicast" + }, + { + "name": "apicast.policy.camel", + "configuration": { + "all_proxy": "http://127.0.0.1:$TEST_NGINX_HTTP_PROXY_PORT" + } + } + ] + } + } + ] +} +--- backend + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream + server_name test-upstream.lvh.me; + location / { + echo_read_request_body; + echo_request_body; + } +--- request eval +"POST /?user_key= \n" . $ENV{LARGE_BODY} +--- response_body eval chomp +$ENV{LARGE_BODY} +--- error_code: 200 +--- error_log env +using proxy: http://127.0.0.1:$TEST_NGINX_HTTP_PROXY_PORT +--- no_error_log +[error] +--- grep_error_log eval +qr/a client request body is buffered to a temporary file/ +--- grep_error_log_out +a client request body is buffered to a temporary file + + + +=== TEST 14: all_proxy with request_unbuffered policy and chunked body +--- configuration +{ + "services": [ + { + "id": 42, + "backend_version": 1, + "backend_authentication_type": "service_token", + "backend_authentication_value": "token-value", + "proxy": { + "api_backend": "http://test-upstream.lvh.me:$TEST_NGINX_SERVER_PORT/", + "proxy_rules": [ + { "pattern": "/", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { + "name": "request_unbuffered" + }, + { + "name": "apicast.policy.apicast" + }, + { + "name": "apicast.policy.camel", + "configuration": { + "all_proxy": "http://127.0.0.1:$TEST_NGINX_HTTP_PROXY_PORT" + } + } + ] + } + } + ] +} +--- backend + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream + server_name test-upstream.lvh.me; + location / { + access_by_lua_block { + assert = require('luassert') + local content_length = ngx.req.get_headers()["Content-Length"] + local encoding = ngx.req.get_headers()["Transfer-Encoding"] + assert.equal('chunked', encoding) + assert.falsy(content_length) + } + echo_read_request_body; + echo_request_body; + } +--- more_headers +Transfer-Encoding: chunked +--- request eval +"POST /?user_key=value +". +sprintf("%x\r\n", length $ENV{"LARGE_BODY"}). +$ENV{LARGE_BODY} +."\r +0\r +\r +" +--- response_body eval +$ENV{LARGE_BODY} +--- error_code: 200 +--- error_log env +using proxy: http://127.0.0.1:$TEST_NGINX_HTTP_PROXY_PORT +--- no_error_log +[error] +--- grep_error_log eval +qr/a client request body is buffered to a temporary file/ +--- grep_error_log_out +a client request body is buffered to a temporary file + + + +=== TEST 15: https_proxy with request_unbuffered policy, only upstream and proxy_pass will buffer +the request +--- init eval +$Test::Nginx::Util::PROXY_SSL_PORT = Test::APIcast::get_random_port(); +$Test::Nginx::Util::ENDPOINT_SSL_PORT = Test::APIcast::get_random_port(); +--- configuration random_port env eval +</tmp/out.txt' or die $!; +print $out $s; +close $out; +$s +--- response_body eval +$ENV{"LARGE_BODY"} +--- error_code: 200 +--- error_log env +using proxy: $TEST_NGINX_HTTP_PROXY +--- no_error_log +[error] +--- grep_error_log eval +qr/a client request body is buffered to a temporary file/ +--- grep_error_log_out +a client request body is buffered to a temporary file + + + +=== TEST 12: all_proxy with request_unbuffered policy +--- configuration +{ + "services": [ + { + "backend_version": 1, + "backend_authentication_type": "service_token", + "backend_authentication_value": "token-value", + "proxy": { + "api_backend": "http://test-upstream.lvh.me:$TEST_NGINX_SERVER_PORT/", + "proxy_rules": [ + { "pattern": "/", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { + "name": "request_unbuffered" + }, + { + "name": "apicast.policy.apicast" + }, + { + "name": "apicast.policy.http_proxy", + "configuration": { + "all_proxy": "$TEST_NGINX_HTTP_PROXY" + } + } + ] + } + } + ] +} +--- backend +server_name test_backend.lvh.me; + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream +server_name test-upstream.lvh.me; + location /test { + echo_read_request_body; + echo_request_body; + } +--- request eval +"POST /test?user_key= \n" . $ENV{LARGE_BODY} +--- response_body eval chomp +$ENV{LARGE_BODY} +--- error_code: 200 +--- error_log env +using proxy: $TEST_NGINX_HTTP_PROXY +--- no_error_log +[error] +--- grep_error_log eval +qr/a client request body is buffered to a temporary file/ +--- grep_error_log_out +a client request body is buffered to a temporary file + + + +=== TEST 13: all_proxy with request_unbuffered policy + chunked request +--- configuration +{ + "services": [ + { + "id": 42, + "backend_version": 1, + "backend_authentication_type": "service_token", + "backend_authentication_value": "token-value", + "proxy": { + "api_backend": "http://test-upstream.lvh.me:$TEST_NGINX_SERVER_PORT/", + "proxy_rules": [ + { "pattern": "/", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { + "name": "request_unbuffered" + }, + { + "name": "apicast.policy.apicast" + }, + { + "name": "apicast.policy.http_proxy", + "configuration": { + "all_proxy": "$TEST_NGINX_HTTP_PROXY" + } + } + ] + } + } + ] +} +--- backend + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream +server_name test-upstream.lvh.me; + location / { + access_by_lua_block { + assert = require('luassert') + local content_length = ngx.req.get_headers()["Content-Length"] + local encoding = ngx.req.get_headers()["Transfer-Encoding"] + assert.equal('chunked', encoding) + assert.falsy(content_length) + } + echo_read_request_body; + echo_request_body; + } +--- more_headers +Transfer-Encoding: chunked +--- request eval +my $s = "POST /test?user_key=value +". +sprintf("%x\r\n", length $ENV{"LARGE_BODY"}). +$ENV{LARGE_BODY} +."\r +0\r +\r +"; +open my $out, '>/tmp/out.txt' or die $!; +print $out $s; +close $out; +$s +--- response_body eval +$ENV{"LARGE_BODY"} +--- error_code: 200 +--- error_log env +using proxy: $TEST_NGINX_HTTP_PROXY +--- no_error_log +[error] +--- grep_error_log eval +qr/a client request body is buffered to a temporary file/ +--- grep_error_log_out +a client request body is buffered to a temporary file + + + +=== TEST 14: https_proxy with request_unbuffered policy +--- configuration random_port env +{ + "services": [ + { + "backend_version": 1, + "proxy": { + "api_backend": "https://test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT", + "proxy_rules": [ + { "pattern": "/", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { + "name": "request_unbuffered" + }, + { + "name": "apicast.policy.apicast" + }, + { + "name": "apicast.policy.http_proxy", + "configuration": { + "https_proxy": "$TEST_NGINX_HTTPS_PROXY" + } + } + ] + } + } + ] +} +--- backend env + server_name test-backend.lvh.me; + listen $TEST_NGINX_RANDOM_PORT ssl; + ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; + ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream env +server_name test-upstream.lvh.me; +listen $TEST_NGINX_RANDOM_PORT ssl; +ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; +ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; +location /test { + echo_read_request_body; + echo_request_body; +} +--- request eval +"POST /test?user_key= \n" . $ENV{LARGE_BODY} +--- response_body eval chomp +$ENV{LARGE_BODY} +--- error_code: 200 +--- error_log env +using proxy: $TEST_NGINX_HTTPS_PROXY +proxy request: CONNECT test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT HTTP/1.1 +--- no_error_log +[error] +--- grep_error_log eval +qr/a client request body is buffered to a temporary file/ +--- grep_error_log_out +a client request body is buffered to a temporary file +--- user_files fixture=tls.pl eval + + + +=== TEST 15: https_proxy with request_unbuffered policy +--- configuration random_port env +{ + "services": [ + { + "backend_version": 1, + "proxy": { + "api_backend": "https://test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT", + "proxy_rules": [ + { "pattern": "/", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { + "name": "request_unbuffered" + }, + { + "name": "apicast.policy.apicast" + }, + { + "name": "apicast.policy.http_proxy", + "configuration": { + "https_proxy": "$TEST_NGINX_HTTPS_PROXY" + } + } + ] + } + } + ] +} +--- backend env + server_name test-backend.lvh.me; + listen $TEST_NGINX_RANDOM_PORT ssl; + ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; + ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream env +server_name test-upstream.lvh.me; +listen $TEST_NGINX_RANDOM_PORT ssl; +ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; +ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; +location /test { + access_by_lua_block { + assert = require('luassert') + local content_length = ngx.req.get_headers()["Content-Length"] + local encoding = ngx.req.get_headers()["Transfer-Encoding"] + assert.equal('chunked', encoding) + assert.falsy(content_length) + } + echo_read_request_body; + echo_request_body; +} +--- more_headers +Transfer-Encoding: chunked +--- request eval +"POST /test?user_key=value +". +sprintf("%x\r\n", length $ENV{"LARGE_BODY"}). +$ENV{LARGE_BODY} +."\r +0\r +\r +" +--- response_body eval +$ENV{LARGE_BODY} +--- error_code: 200 +--- error_log env +using proxy: $TEST_NGINX_HTTPS_PROXY +proxy request: CONNECT test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT HTTP/1.1 +--- no_error_log +[error] +--- grep_error_log eval +qr/a client request body is buffered to a temporary file/ +--- grep_error_log_out +a client request body is buffered to a temporary file +--- user_files fixture=tls.pl eval diff --git a/t/http-proxy.t b/t/http-proxy.t index 2710512f6..de623f152 100644 --- a/t/http-proxy.t +++ b/t/http-proxy.t @@ -1795,3 +1795,291 @@ proxy request: CONNECT test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT HTTP/1.1 --- no_error_log [error] --- user_files fixture=tls.pl eval + + + +=== TEST 32: HTTP_PROXY with request_unbuffered policy, only upstream server will buffer the +request +--- env eval +( + "http_proxy" => $ENV{TEST_NGINX_HTTP_PROXY}, + 'BACKEND_ENDPOINT_OVERRIDE' => "http://test_backend.lvh.me:$ENV{TEST_NGINX_SERVER_PORT}" +) +--- configuration +{ + "services": [ + { + "backend_version": 1, + "proxy": { + "api_backend": "http://test-upstream.lvh.me:$TEST_NGINX_SERVER_PORT", + "proxy_rules": [ + { "pattern": "/test", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { + "name": "request_unbuffered" + }, + { + "name": "apicast", + "version": "builtin", + "configuration": {} + } + ] + } + } + ] +} +--- backend +server_name test_backend.lvh.me; + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream +server_name test-upstream.lvh.me; + location /test { + echo_read_request_body; + echo_request_body; + } +--- request eval +"POST /test?user_key= \n" . $ENV{LARGE_BODY} +--- response_body eval chomp +$ENV{LARGE_BODY} +--- error_code: 200 +--- error_log env +using proxy: $TEST_NGINX_HTTP_PROXY +--- no_error_log +[error] +--- grep_error_log eval +qr/a client request body is buffered to a temporary file/ +--- grep_error_log_out +a client request body is buffered to a temporary file + + + +=== TEST 33: HTTP_PROXY with request_unbuffered policy and chunked body, only upstream server will buffer the +request +--- env eval +( + "http_proxy" => $ENV{TEST_NGINX_HTTP_PROXY}, + 'BACKEND_ENDPOINT_OVERRIDE' => "http://test_backend.lvh.me:$ENV{TEST_NGINX_SERVER_PORT}" +) +--- configuration +{ + "services": [ + { + "backend_version": 1, + "proxy": { + "api_backend": "http://test-upstream.lvh.me:$TEST_NGINX_SERVER_PORT", + "proxy_rules": [ + { "pattern": "/test", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { + "name": "request_unbuffered" + }, + { + "name": "apicast", + "version": "builtin", + "configuration": {} + } + ] + } + } + ] +} +--- backend +server_name test_backend.lvh.me; + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream +server_name test-upstream.lvh.me; + location /test { + access_by_lua_block { + assert = require('luassert') + local content_length = ngx.req.get_headers()["Content-Length"] + local encoding = ngx.req.get_headers()["Transfer-Encoding"] + assert.equal('chunked', encoding) + assert.falsy(content_length) + } + echo_read_request_body; + echo_request_body; + } +--- more_headers +Transfer-Encoding: chunked +--- request eval +"POST /test?user_key=value +". +sprintf("%x\r\n", length $ENV{"LARGE_BODY"}). +$ENV{LARGE_BODY} +."\r +0\r +\r +" +--- response_body eval +$ENV{LARGE_BODY} +--- error_code: 200 +--- error_log env +using proxy: $TEST_NGINX_HTTP_PROXY +--- no_error_log +[error] +--- grep_error_log eval +qr/a client request body is buffered to a temporary file/ +--- grep_error_log_out +a client request body is buffered to a temporary file + + + +=== TEST 34: HTTPS_PROXY with request_unbuffered policy, only the upstream server will buffer +the request +--- env random_port eval +( + 'https_proxy' => $ENV{TEST_NGINX_HTTPS_PROXY}, + 'BACKEND_ENDPOINT_OVERRIDE' => "https://test-backend.lvh.me:$ENV{TEST_NGINX_RANDOM_PORT}" +) +--- configuration random_port env +{ + "services": [ + { + "backend_version": 1, + "proxy": { + "api_backend": "https://test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT", + "proxy_rules": [ + { "pattern": "/test", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { + "name": "request_unbuffered" + }, + { + "name": "apicast", + "version": "builtin", + "configuration": {} + } + ] + } + } + ] +} +--- backend env + server_name test-backend.lvh.me; + listen $TEST_NGINX_RANDOM_PORT ssl; + ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; + ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream env +server_name test-upstream.lvh.me; +listen $TEST_NGINX_RANDOM_PORT ssl; +ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; +ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; +location /test { + echo_read_request_body; + echo_request_body; +} +--- request eval +"POST /test?user_key= \n" . $ENV{LARGE_BODY} +--- response_body eval chomp +$ENV{LARGE_BODY} +--- error_code: 200 +--- error_log env +using proxy: $TEST_NGINX_HTTPS_PROXY +proxy request: CONNECT test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT HTTP/1.1 +--- no_error_log +[error] +--- grep_error_log eval +qr/a client request body is buffered to a temporary file/ +--- grep_error_log_out +a client request body is buffered to a temporary file +--- user_files fixture=tls.pl eval + + + +=== TEST 35: HTTPS_PROXY with request_unbuffered policy, only the upstream server will buffer +the request +--- env random_port eval +( + 'https_proxy' => $ENV{TEST_NGINX_HTTPS_PROXY}, + 'BACKEND_ENDPOINT_OVERRIDE' => "https://test-backend.lvh.me:$ENV{TEST_NGINX_RANDOM_PORT}" +) +--- configuration random_port env +{ + "services": [ + { + "backend_version": 1, + "proxy": { + "api_backend": "https://test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT", + "proxy_rules": [ + { "pattern": "/test", "http_method": "POST", "metric_system_name": "hits", "delta": 2 } + ], + "policy_chain": [ + { + "name": "request_unbuffered" + }, + { + "name": "apicast", + "version": "builtin", + "configuration": {} + } + ] + } + } + ] +} +--- backend env + server_name test-backend.lvh.me; + listen $TEST_NGINX_RANDOM_PORT ssl; + ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; + ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; + location /transactions/authrep.xml { + content_by_lua_block { + ngx.exit(ngx.OK) + } + } +--- upstream env +server_name test-upstream.lvh.me; +listen $TEST_NGINX_RANDOM_PORT ssl; +ssl_certificate $TEST_NGINX_SERVER_ROOT/html/server.crt; +ssl_certificate_key $TEST_NGINX_SERVER_ROOT/html/server.key; +location /test { + access_by_lua_block { + assert = require('luassert') + local content_length = ngx.req.get_headers()["Content-Length"] + local encoding = ngx.req.get_headers()["Transfer-Encoding"] + assert.equal('chunked', encoding) + assert.falsy(content_length) + } + echo_read_request_body; + echo_request_body; +} +--- more_headers +Transfer-Encoding: chunked +--- request eval +"POST /test?user_key=value +". +sprintf("%x\r\n", length $ENV{"LARGE_BODY"}). +$ENV{LARGE_BODY} +."\r +0\r +\r +" +--- response_body eval +$ENV{LARGE_BODY} +--- error_code: 200 +--- error_log env +using proxy: $TEST_NGINX_HTTPS_PROXY +proxy request: CONNECT test-upstream.lvh.me:$TEST_NGINX_RANDOM_PORT HTTP/1.1 +--- no_error_log +[error] +--- grep_error_log eval +qr/a client request body is buffered to a temporary file/ +--- grep_error_log_out +a client request body is buffered to a temporary file +--- user_files fixture=tls.pl eval From e3f6db59834522bacdc47dcefa5c0b4da952fd48 Mon Sep 17 00:00:00 2001 From: An Tran Date: Fri, 29 Dec 2023 18:42:21 +1000 Subject: [PATCH 3/8] Fixed response body not being sent --- gateway/src/apicast/http_proxy.lua | 31 ++++++++++++---- .../policy/request_unbuffered/README.md | 5 +++ gateway/src/resty/http/response_writer.lua | 37 ++++++++++++++++++- 3 files changed, 65 insertions(+), 8 deletions(-) diff --git a/gateway/src/apicast/http_proxy.lua b/gateway/src/apicast/http_proxy.lua index abbd9c634..3c252c993 100644 --- a/gateway/src/apicast/http_proxy.lua +++ b/gateway/src/apicast/http_proxy.lua @@ -126,20 +126,30 @@ local function forward_https_request(proxy_uri, uri, proxy_opts) local req_method = ngx_get_method() local encoding = ngx.req.get_headers()["Transfer-Encoding"] local is_chunked = encoding and encoding:lower() == "chunked" + local content_type = ngx.req.get_headers()["Content-Type"] + local content_type_is_urlencoded = content_type and content_type:lower() == "application/x-www-form-urlencoded" + local raw = false if http_methods_with_body[req_method] then - if opts.request_unbuffered and ngx_http_version() == 1.1 then + + -- When the content type is "application/x-www-form-urlencoded" the body is always pre-read. + -- See: gateway/src/apicast/configuration/service.lua:214 + -- + -- Due to this, ngx.req.socket() will fail with "request body already exists" error or return + -- socket but hang on read in case of raw socket. Therefore, we only retrieve body from the + -- socket if the content type is not "application/x-www-form-urlencoded" + if opts.request_unbuffered and ngx_http_version() == 1.1 and not content_type_is_urlencoded then local _, err = handle_expect() if err then ngx.log(ngx.ERR, "failed to handle expect header, err: ", err) return ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) end - if is_chunked then -- The default ngx reader does not support chunked request -- so we will need to get the raw request socket and manually -- decode the chunked request sock, err = ngx.req.socket(true) + raw = true else sock, err = ngx.req.socket() end @@ -151,6 +161,11 @@ local function forward_https_request(proxy_uri, uri, proxy_opts) body = client_body_reader(sock, DEFAULT_CHUNKSIZE, is_chunked) else + -- TODO: Due to ngx.req.read_body(). The current implementation will not work with grpc service + -- See: https://github.com/3scale/APIcast/pull/1419 + -- Should we get the body from socket by default and only read buffered body if + -- "Content-Type: application/x-www-form-urlencoded"? + -- -- This is needed to call ngx.req.get_body_data() below. ngx.req.read_body() @@ -178,9 +193,9 @@ local function forward_https_request(proxy_uri, uri, proxy_opts) if is_chunked then -- If the body is smaller than "client_boby_buffer_size" the Content-Length header is - -- set based on the size of the buffer. However, when the body is rendered to a file, - -- we will need to calculate and manually set the Content-Length header based on the - -- file size + -- set by openresty based on the size of the buffer. However, when the body is rendered + -- to a file, we will need to calculate and manually set the Content-Length header based + -- on the file size local contentLength, err = file_size(temp_file_path) if err then ngx.log(ngx.ERR, "HTTPS proxy: Failed to set content length, err: ", err) @@ -192,7 +207,9 @@ local function forward_https_request(proxy_uri, uri, proxy_opts) end end - -- The whole request is buffered in the memory so remove the Transfer-Encoding: chunked + -- The whole request is buffered with chunked encoding removed, so remove the Transfer-Encoding: chunked + -- header, otherwise the upstream won't be able to read the body as it expected chunk encoded + -- body if is_chunked then ngx.req.set_header("Transfer-Encoding", nil) end @@ -221,7 +238,7 @@ local function forward_https_request(proxy_uri, uri, proxy_opts) res, err = httpc:request(request) if res then - if opts.request_unbuffered and is_chunked then + if opts.request_unbuffered and raw then local bytes, err = send_response(sock, res, DEFAULT_CHUNKSIZE) if not bytes then ngx.log(ngx.ERR, "failed to send response: ", err) diff --git a/gateway/src/apicast/policy/request_unbuffered/README.md b/gateway/src/apicast/policy/request_unbuffered/README.md index ebc7b3508..f2e570c7e 100644 --- a/gateway/src/apicast/policy/request_unbuffered/README.md +++ b/gateway/src/apicast/policy/request_unbuffered/README.md @@ -12,3 +12,8 @@ This policy allows to disable request buffering } ``` +## Caveats + +- Because APIcast allows defining mapping rules based on request content, POST requests with + `Content-type: application/x-www-form-urlencoded` will always be buffered regardless of the + `request_unbuffered` policy is enabled or not. diff --git a/gateway/src/resty/http/response_writer.lua b/gateway/src/resty/http/response_writer.lua index 9ab849676..fc320512d 100644 --- a/gateway/src/resty/http/response_writer.lua +++ b/gateway/src/resty/http/response_writer.lua @@ -1,8 +1,25 @@ +local fmt = string.format +local str_lower = string.lower + local _M = { } local cr_lf = "\r\n" +-- http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html#sec13.5.1 +local HOP_BY_HOP_HEADERS = { + ["connection"] = true, + ["keep-alive"] = true, + ["proxy-authenticate"] = true, + ["proxy-authorization"] = true, + ["te"] = true, + ["trailers"] = true, + ["transfer-encoding"] = true, + ["upgrade"] = true, + ["content-length"] = true, -- Not strictly hop-by-hop, but Nginx will deal + -- with this (may send chunked for example). +} + local function send(socket, data) if not data or data == '' then ngx.log(ngx.DEBUG, 'skipping sending nil') @@ -28,8 +45,26 @@ function _M.send_response(sock, response, chunksize) end -- Status line - local status = "HTTP/1.1 " .. response.status .. " " .. response.reason .. cr_lf + -- TODO: get HTTP version from request + local status = fmt("HTTP/%d.%d %03d %s\r\n", 1, 1, response.status, response.reason) bytes, err = send(sock, status) + if not bytes then + return nil, "failed to send status line, err: " .. (err or "unknown") + end + + -- Filter out hop-by-hop headeres + for k, v in pairs(response.headers) do + if not HOP_BY_HOP_HEADERS[str_lower(k)] then + local header = fmt("%s: %s\r\n", k, v) + bytes, err = sock:send(header) + if not bytes then + return nil, "failed to send status line, err: " .. (err or "unknown") + end + end + end + + -- End-of-header + bytes, err = send(sock, cr_lf) if not bytes then return nil, "failed to send status line, err: " .. (err or "unknown") end From f5af61801cb9c9522cc1bf1d9040a608a7fa4459 Mon Sep 17 00:00:00 2001 From: An Tran Date: Mon, 8 Jan 2024 12:00:39 +1000 Subject: [PATCH 4/8] Handle "Expect:100-continue" header only when raw socket is used With regular socket, openresty will process the Expect header on socket read, so we only need to send back "100 Continue" when using raw socket. --- gateway/src/apicast/http_proxy.lua | 26 ----------------------- gateway/src/resty/http/request_reader.lua | 26 +++++++++++++++++++++++ 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/gateway/src/apicast/http_proxy.lua b/gateway/src/apicast/http_proxy.lua index 3c252c993..55a324a36 100644 --- a/gateway/src/apicast/http_proxy.lua +++ b/gateway/src/apicast/http_proxy.lua @@ -98,27 +98,6 @@ local function absolute_url(uri) ) end -local function handle_expect() - local expect = ngx.req.get_headers()["Expect"] - if type(expect) == "table" then - expect = expect[1] - end - - if expect and expect:lower() == "100-continue" then - ngx.status = 100 - local ok, err = ngx_send_headers() - - if not ok then - return nil, "failed to send response header: " .. (err or "unknown") - end - - ok, err = ngx_flush(true) - if not ok then - return nil, "failed to flush response header: " .. (err or "unknown") - end - end -end - local function forward_https_request(proxy_uri, uri, proxy_opts) local body, err local sock @@ -139,11 +118,6 @@ local function forward_https_request(proxy_uri, uri, proxy_opts) -- socket but hang on read in case of raw socket. Therefore, we only retrieve body from the -- socket if the content type is not "application/x-www-form-urlencoded" if opts.request_unbuffered and ngx_http_version() == 1.1 and not content_type_is_urlencoded then - local _, err = handle_expect() - if err then - ngx.log(ngx.ERR, "failed to handle expect header, err: ", err) - return ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) - end if is_chunked then -- The default ngx reader does not support chunked request -- so we will need to get the raw request socket and manually diff --git a/gateway/src/resty/http/request_reader.lua b/gateway/src/resty/http/request_reader.lua index 6b79e82d2..07188a705 100644 --- a/gateway/src/resty/http/request_reader.lua +++ b/gateway/src/resty/http/request_reader.lua @@ -1,10 +1,28 @@ local httpc = require "resty.resolver.http" +local ngx_req = ngx.req local _M = { } local cr_lf = "\r\n" +local function test_expect(sock) + local expect = ngx_req.get_headers()["Expect"] + + if expect == "" or ngx_req.http_version == 1.0 then + return true + end + + if expect and expect:lower() == "100-continue" then + local _, err = sock:send("HTTP/1.1 100 Continue\r\n\r\n") + if err then + ngx.log(ngx.ERR, "failed to handle expect header, err: ", err) + return false, err + end + end + return true +end + -- chunked_reader return a body reader that translates the data read from -- lua-resty-http client_body_reader to HTTP "chunked" format before returning it -- @@ -17,6 +35,14 @@ local function chunked_reader(sock, chunksize) return nil end + -- If Expect: 100-continue is sent upstream, lua-resty-http will only call + -- _send_body after receiving "100 Continue". So it's safe to process the + -- Expect header and send "100 Continue" downstream here. + local ok, err = test_expect(sock) + if not ok then + return nil, err + end + return function() if eof then return nil From 5e05612a82ca23f6e892abebcc5c0552e409c2a2 Mon Sep 17 00:00:00 2001 From: An Tran Date: Mon, 8 Jan 2024 17:59:50 +1000 Subject: [PATCH 5/8] Update CHANGELOG.md file --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index bea15a552..ed1900978 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - Fixed CVE-2023-44487 (HTTP/2 Rapid Reset) [PR #1417](https://github.com/3scale/apicast/pull/1417) [THREESCALE-10224](https://issues.redhat.com/browse/THREESCALE-10224) +- Fixed issue where the proxy policy could not handle requests with "Transfer-Encoding: chunked" header [PR #1403](https://github.com/3scale/APIcast/pull/1403) [THREESCALE-9542](https://issues.redhat.com/browse/THREESCALE-9542) + ### Added - Detect number of CPU shares when running on Cgroups V2 [PR #1410](https://github.com/3scale/apicast/pull/1410) [THREESCALE-10167](https://issues.redhat.com/browse/THREESCALE-10167) From 2bfc227a2ac56f94db5086458079f069d0883282 Mon Sep 17 00:00:00 2001 From: An Tran Date: Fri, 19 Jan 2024 18:10:42 +1000 Subject: [PATCH 6/8] Wrapping block I/O call in a coroutine --- gateway/src/apicast/http_proxy.lua | 2 +- gateway/src/resty/file.lua | 21 ++++++++++++--------- 2 files changed, 13 insertions(+), 10 deletions(-) diff --git a/gateway/src/apicast/http_proxy.lua b/gateway/src/apicast/http_proxy.lua index 55a324a36..a14135f27 100644 --- a/gateway/src/apicast/http_proxy.lua +++ b/gateway/src/apicast/http_proxy.lua @@ -170,7 +170,7 @@ local function forward_https_request(proxy_uri, uri, proxy_opts) -- set by openresty based on the size of the buffer. However, when the body is rendered -- to a file, we will need to calculate and manually set the Content-Length header based -- on the file size - local contentLength, err = file_size(temp_file_path) + local contentLength, err = file_size(temp_file_path)() if err then ngx.log(ngx.ERR, "HTTPS proxy: Failed to set content length, err: ", err) ngx.exit(ngx.HTTP_INTERNAL_SERVER_ERROR) diff --git a/gateway/src/resty/file.lua b/gateway/src/resty/file.lua index 592c61059..4ea5ff5c2 100644 --- a/gateway/src/resty/file.lua +++ b/gateway/src/resty/file.lua @@ -1,4 +1,5 @@ local co_yield = coroutine._yield +local co_wrap = coroutine._wrap local open = io.open local co_wrap_iter = require("resty.coroutines").co_wrap_iter @@ -29,19 +30,21 @@ function _M.file_reader(filename) end function _M.file_size(filename) - local handle, err = open(filename) + return co_wrap(function() + local handle, err = open(filename) - if err then - return nil, err - end + if err then + return nil, err + end - local current = handle:seek() - local size = handle:seek("end") + local current = handle:seek() + local size = handle:seek("end") - handle:seek("set", current) - handle:close() + handle:seek("set", current) + handle:close() - return size + return size + end) end return _M From f19b9e555288c84001b16632526487df9fdb5cc3 Mon Sep 17 00:00:00 2001 From: An Tran Date: Fri, 19 Jan 2024 18:19:26 +1000 Subject: [PATCH 7/8] Update request_unbuffered policy README.md file --- .../policy/request_unbuffered/README.md | 96 +++++++++++++++++-- 1 file changed, 88 insertions(+), 8 deletions(-) diff --git a/gateway/src/apicast/policy/request_unbuffered/README.md b/gateway/src/apicast/policy/request_unbuffered/README.md index f2e570c7e..5b787df6d 100644 --- a/gateway/src/apicast/policy/request_unbuffered/README.md +++ b/gateway/src/apicast/policy/request_unbuffered/README.md @@ -1,19 +1,99 @@ # APICast Request Unbuffered -This policy allows to disable request buffering +## Description + +When enable this policy will dymanically sets the [`proxy_request_buffering: off`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering +) directive per service. + + +## Technical details + +By default, NGINX reads the entire request body into memory (or buffers large requests into disk) before proxying it to the upstream server. However, reading bodies can become expensive, especially when requests with large payloads are sent. + +For example, when the client sends 10GB, NGINX will buffer the entire 10GB to disk before sending anything to +the upstream server. + +When `proxy_request_buffering` is in the chain, request buffering will be disabled and the request body will be sent to the proxied server immediately as it received. This can help minimize time spent sending data to a service and disk I/O for requests with big body. However, there are caveats and corner cases applied, [**Caveats**](#caveats) + +The policy also provides a consistent behavior across multiple scenarios like: + +``` +- APIcast <> upstream HTTP 1.1 plain +- APIcast <> upstream TLS +- APIcast <> HTTP Proxy (env var) <> upstream HTTP 1.1 plain +- APIcast <> HTTP Proxy (policy) <> upstream HTTP 1.1 plain +- APIcast <> HTTP Proxy (camel proxy) <> upstream HTTP 1.1 plain +- APIcast <> HTTP Proxy (env var) <> upstream TLS +- APIcast <> HTTP Proxy (policy) <> upstream TLS +- APIcast <> HTTP Proxy (camel proxy) <> upstream TLS +``` + +## Why don't we also support disable response buffering? + +The response buffering is enabled by default in NGINX (the [`proxy_buffering: on`]() directive). It does +this to shield the backend against slow clients ([slowloris attack](https://en.wikipedia.org/wiki/Slowloris_(computer_security))). + +If the `proxy_buffering` is disabled, the upstream server will be forced to keep the connection open until all data has been received by the client. Thereforce, NGINX [advises](https://www.nginx.com/blog/avoiding-top-10-nginx-configuration-mistakes/#proxy_buffering-off) against disabling `proxy_buffering` as it will potentially waste upstream server resources. ## Example configuration ``` -{ - "name": "request_unbuffered", - "version": "builtin", - "configuration": {} -} +"policy_chain": [ + { + "name": "request_unbuffered", + "version": "builtin", + }, + { + "name": "apicast.policy.apicast" + } +] +``` + +Use with Proxy policy + +``` +"policy_chain": [ + { + "name": "request_unbuffered", + "version": "builtin", + }, + { + "name": "apicast.policy.http_proxy", + "configuration": { + "all_proxy": "http://foo:bar@192.168.15.103:8888/", + "https_proxy": "http://192.168.15.103:8888/", + "http_proxy": "http://192.168.15.103:8888/" + } + } +] +``` + +Use with Camel Proxy policy + +``` +"policy_chain": [ + { + "name": "request_unbuffered", + "version": "builtin", + }, + { + "name": "apicast.policy.camel", + "configuration": { + "http_proxy": "http://192.168.15.103:8080/", + "https_proxy": "http://192.168.15.103:8443/", + "all_proxy": "http://192.168.15.103:8080/" + } + } +] ``` ## Caveats -- Because APIcast allows defining mapping rules based on request content, POST requests with - `Content-type: application/x-www-form-urlencoded` will always be buffered regardless of the +- Because APIcast allows defining mapping rules based on request content, ie `POST /some_path?a_param={a_value}` + will match a request like `POST "http://apicast_host:8080/some_path"` with a form URL-encoded body like: `a_param=abc` + , requests with `Content-type: application/x-www-form-urlencoded` will always be buffered regardless of the `request_unbuffered` policy is enabled or not. +- For a request with "small" body that fits into [`client_body_buffer_size`](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size) and with header "Transfer-Encoding: chunked", NGINX will always read and know the length of the body. + Then it will send the request to upstream with the "Content-Length" header. +- If a client uses chunked transfer encoding with HTTP/1.0, NGINX will always buffer the request body +- Disable request buffering could potentially expose the backend to [slowloris attack](https://en.wikipedia.org/wiki/Slowloris_(computer_security)). Therefore, we recommend to only use this policy when needed. From b494ca8d554c7341856635fd50ff5742bccc5f96 Mon Sep 17 00:00:00 2001 From: An Tran Date: Mon, 22 Jan 2024 17:10:30 +1000 Subject: [PATCH 8/8] Address PR comments --- .../policy/request_unbuffered/README.md | 26 +++++++++---------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/gateway/src/apicast/policy/request_unbuffered/README.md b/gateway/src/apicast/policy/request_unbuffered/README.md index 5b787df6d..e1d27051d 100644 --- a/gateway/src/apicast/policy/request_unbuffered/README.md +++ b/gateway/src/apicast/policy/request_unbuffered/README.md @@ -5,15 +5,13 @@ When enable this policy will dymanically sets the [`proxy_request_buffering: off`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_request_buffering ) directive per service. - ## Technical details -By default, NGINX reads the entire request body into memory (or buffers large requests into disk) before proxying it to the upstream server. However, reading bodies can become expensive, especially when requests with large payloads are sent. +By default, NGINX reads the entire request body into memory or buffers large requests to disk before forwarding them to the upstream server. Reading bodies can become expensive, especially when sending requests containing large payloads. -For example, when the client sends 10GB, NGINX will buffer the entire 10GB to disk before sending anything to -the upstream server. +For example, when the client sends 10GB, NGINX will buffer the entire 10GB to disk before sending anything to the upstream server. -When `proxy_request_buffering` is in the chain, request buffering will be disabled and the request body will be sent to the proxied server immediately as it received. This can help minimize time spent sending data to a service and disk I/O for requests with big body. However, there are caveats and corner cases applied, [**Caveats**](#caveats) +When the `request_unbuffered` is in the chain, request buffering is disabled, sending the request body to the proxied server immediately upon receiving it. This can help minimize time spent sending data to a service and disk I/O for requests with big body. However, there are caveats and corner cases applied, [**Caveats**](#caveats) The policy also provides a consistent behavior across multiple scenarios like: @@ -30,10 +28,15 @@ The policy also provides a consistent behavior across multiple scenarios like: ## Why don't we also support disable response buffering? -The response buffering is enabled by default in NGINX (the [`proxy_buffering: on`]() directive). It does -this to shield the backend against slow clients ([slowloris attack](https://en.wikipedia.org/wiki/Slowloris_(computer_security))). +The response buffering is enabled by default in NGINX (the [`proxy_buffering: on`]() directive). It does this to shield the backend against slow clients ([slowloris attack](https://en.wikipedia.org/wiki/Slowloris_(computer_security))). + +If the `proxy_buffering` is disabled, the upstream server keeps the connection open until all data is received by the client. NGINX [advises](https://www.nginx.com/blog/avoiding-top-10-nginx-configuration-mistakes/#proxy_buffering-off) against disabling `proxy_buffering` as it will potentially waste upstream server resources. + +## Why does upstream receive a "Content-Length" header when the original request is sent with "Transfer-Encoding: chunked" + +For a request with "small" body that fits into [`client_body_buffer_size`](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size) and with header "Transfer-Encoding: chunked", NGINX will always read and know the length of the body. Then it will send the request to upstream with the "Content-Length" header. -If the `proxy_buffering` is disabled, the upstream server will be forced to keep the connection open until all data has been received by the client. Thereforce, NGINX [advises](https://www.nginx.com/blog/avoiding-top-10-nginx-configuration-mistakes/#proxy_buffering-off) against disabling `proxy_buffering` as it will potentially waste upstream server resources. +If a client uses chunked transfer encoding with HTTP/1.0, NGINX will always buffer the request body ## Example configuration @@ -89,11 +92,6 @@ Use with Camel Proxy policy ## Caveats -- Because APIcast allows defining mapping rules based on request content, ie `POST /some_path?a_param={a_value}` - will match a request like `POST "http://apicast_host:8080/some_path"` with a form URL-encoded body like: `a_param=abc` - , requests with `Content-type: application/x-www-form-urlencoded` will always be buffered regardless of the +- APIcast allows defining of mapping rules based on request content. For example, `POST /some_path?a_param={a_value}` will match a request like `POST "http://apicast_host:8080/some_path"` with a form URL-encoded body like: `a_param=abc`, requests with `Content-type: application/x-www-form-urlencoded` will always be buffered regardless of the `request_unbuffered` policy is enabled or not. -- For a request with "small" body that fits into [`client_body_buffer_size`](https://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size) and with header "Transfer-Encoding: chunked", NGINX will always read and know the length of the body. - Then it will send the request to upstream with the "Content-Length" header. -- If a client uses chunked transfer encoding with HTTP/1.0, NGINX will always buffer the request body - Disable request buffering could potentially expose the backend to [slowloris attack](https://en.wikipedia.org/wiki/Slowloris_(computer_security)). Therefore, we recommend to only use this policy when needed.