Skip to content

Commit

Permalink
feat: validate new document writes against max_http_request_size
Browse files Browse the repository at this point in the history
The validation path is now the following:

If a new doc body is > max_document_size, we throw an error.

If a new attachment is > max_attachment_size, we throw an error.

If the new doc body in combination with new and/or existing
attachments is > max_attachment_size, we throw an error.

This also sets the max_document_size to 2 GB, to restore 1.x and
2.0.x compatibility.

Closes #1200
  • Loading branch information
janl committed Mar 29, 2018
1 parent 790783e commit ed935b7
Show file tree
Hide file tree
Showing 4 changed files with 107 additions and 4 deletions.
2 changes: 1 addition & 1 deletion rel/overlay/etc/default.ini
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ enable_xframe_options = false
; x_forwarded_proto = X-Forwarded-Proto
; x_forwarded_ssl = X-Forwarded-Ssl
; Maximum allowed http request size. Applies to both clustered and local port.
max_http_request_size = 67108864 ; 64 MB
max_http_request_size = 4294967296 ; 2 GB

; [httpd_design_handlers]
; _view =
Expand Down
6 changes: 5 additions & 1 deletion src/couch/src/couch_att.erl
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,8 @@

-export([
upgrade/1,
downgrade/1
downgrade/1,
to_tuple/1
]).

-export([
Expand Down Expand Up @@ -708,6 +709,9 @@ upgrade(#att{} = Att) ->
upgrade(Att) ->
Att.

to_tuple(#att{name=Name, att_len=Len, type=Type, encoding=Encoding}) ->
{att, Name, Len, Type, Encoding}.


%% Downgrade is exposed for interactive convenience. In practice, unless done
%% manually, upgrades are always one-way.
Expand Down
15 changes: 15 additions & 0 deletions src/couch/src/couch_doc.erl
Original file line number Diff line number Diff line change
Expand Up @@ -136,12 +136,27 @@ from_json_obj_validate(EJson, DbName) ->
case couch_ejson_size:encoded_size(Doc#doc.body) =< MaxSize of
true ->
validate_attachment_sizes(Doc#doc.atts),
validate_total_document_size(Doc),
Doc;
false ->
throw({request_entity_too_large, Doc#doc.id})
end.


% sum up the json body size + attachment body size and
% make sure it is < max_http_request_size
validate_total_document_size(#doc{id=DocId, body=Body, atts=Atts0}) ->
MaxReqSize = config:get_integer("httpd", "max_http_request_size", 4294967296), % 2 GB
Boundary = couch_uuids:random(), % mock boundary, is only used for the length
Atts = lists:map(fun couch_att:to_tuple/1, Atts0),
{_, DocSum} = couch_httpd_multipart:length_multipart_stream(Boundary,
?JSON_ENCODE(Body), Atts),
case DocSum =< MaxReqSize of
true -> ok;
false -> throw({request_entity_too_large, DocId})
end.


validate_attachment_sizes([]) ->
ok;
validate_attachment_sizes(Atts) ->
Expand Down
88 changes: 86 additions & 2 deletions src/couch/test/couch_doc_json_tests.erl
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,11 @@ mock(couch_log) ->
ok;
mock(config) ->
meck:new(config, [passthrough]),
meck:expect(config, get_integer,
fun("couchdb", "max_document_size", 4294967296) -> 1024 end),
meck:expect(config, get_integer, fun
("couchdb", "max_document_size", 4294967296) -> 1024;
("httpd", "max_http_request_size", 67108864) -> 1024
end),

meck:expect(config, get, fun(_, _) -> undefined end),
meck:expect(config, get, fun(_, _, Default) -> Default end),
ok.
Expand Down Expand Up @@ -124,6 +127,44 @@ from_json_success_cases() ->
]},
"Attachments are parsed correctly."
},
% see if we count our bytes correctly. This doc should be *exactly* 1024 bytes
{
{[
{<<"_attachments">>, {[
{<<"big.xml">>, {[
{<<"content_type">>, <<"xml/yay">>},
{<<"revpos">>, 1},
{<<"length">>, 319},
{<<"stub">>, true}
]}},
{<<"big.json">>, {[
{<<"content_type">>, <<"json/ftw">>},
{<<"revpos">>, 1},
{<<"length">>, 319},
{<<"stub">>, true}
]}}
]}}
]},
#doc{atts = [
couch_att:new([
{name, <<"big.xml">>},
{data, stub},
{type, <<"xml/yay">>},
{att_len, 319},
{disk_len, 319},
{revpos, 1}
]),
couch_att:new([
{name, <<"big.json">>},
{data, stub},
{type, <<"json/ftw">>},
{att_len, 319},
{disk_len, 319},
{revpos, 1}
])
]},
"Document and attachments == max_http_request_size"
},
{
{[{<<"_deleted">>, true}]},
#doc{deleted = true},
Expand Down Expand Up @@ -281,6 +322,49 @@ from_json_error_cases() ->
end,
{request_entity_too_large, <<"large_doc">>},
"Document too large."
},
% doc json body and each attachment are small enough, but combined are >
% max_http_request_size
{
{[
{<<"_id">>, <<"normal_doc_with_atts">>},
{<<"_attachments">>, {[
{<<"big.xml">>, {[
{<<"content_type">>, <<"xml/yay">>},
{<<"revpos">>, 1},
{<<"length">>, 768},
{<<"stub">>, true}
]}},
{<<"big.json">>, {[
{<<"content_type">>, <<"json/ftw">>},
{<<"revpos">>, 1},
{<<"length">>, 768},
{<<"stub">>, true}
]}}
]}}
]},
{request_entity_too_large, <<"normal_doc_with_atts">>},
"Document too large because of attachments."
},
% see if we count our bytes correctly. This doc should be *exactly* 1025 bytes
{
{[
{<<"_attachments">>, {[
{<<"big.xml">>, {[
{<<"content_type">>, <<"xml/yay">>},
{<<"revpos">>, 1},
{<<"length">>, 320},
{<<"stub">>, true}
]}},
{<<"big.json">>, {[
{<<"content_type">>, <<"json/ftw">>},
{<<"revpos">>, 1},
{<<"length">>, 319},
{<<"stub">>, true}
]}}
]}}
]},
"Document and attachments == max_http_request_size + 1"
}
],

Expand Down

0 comments on commit ed935b7

Please sign in to comment.