221 lines
7.8 KiB
Python
221 lines
7.8 KiB
Python
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
|
|
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
|
|
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
|
|
# This program is free software: you can redistribute it and/or modify
|
|
# it under the terms of the GNU Affero General Public License as
|
|
# published by the Free Software Foundation, either version 3 of the
|
|
# License, or (at your option) any later version.
|
|
#
|
|
# This program is distributed in the hope that it will be useful,
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
# GNU Affero General Public License for more details.
|
|
#
|
|
# You should have received a copy of the GNU Affero General Public License
|
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
# This code is partially taken from django-rest-framework:
|
|
# Copyright (c) 2011-2014, Tom Christie
|
|
|
|
"""
|
|
Parsers are used to parse the content of incoming HTTP requests.
|
|
|
|
They give us a generic way of being able to handle various media types
|
|
on the request, such as form content or json encoded data.
|
|
"""
|
|
from django.conf import settings
|
|
from django.core.files.uploadhandler import StopFutureHandlers
|
|
from django.http import QueryDict
|
|
from django.http.multipartparser import MultiPartParser as DjangoMultiPartParser
|
|
from django.http.multipartparser import MultiPartParserError, parse_header, ChunkIter
|
|
|
|
from django.utils import six
|
|
|
|
from taiga.base.exceptions import ParseError
|
|
from taiga.base.api import renderers
|
|
|
|
import json
|
|
import datetime
|
|
import decimal
|
|
|
|
|
|
class DataAndFiles(object):
|
|
def __init__(self, data, files):
|
|
self.data = data
|
|
self.files = files
|
|
|
|
|
|
class BaseParser(object):
|
|
"""
|
|
All parsers should extend `BaseParser`, specifying a `media_type`
|
|
attribute, and overriding the `.parse()` method.
|
|
"""
|
|
|
|
media_type = None
|
|
|
|
def parse(self, stream, media_type=None, parser_context=None):
|
|
"""
|
|
Given a stream to read from, return the parsed representation.
|
|
Should return parsed data, or a `DataAndFiles` object consisting of the
|
|
parsed data and files.
|
|
"""
|
|
raise NotImplementedError(".parse() must be overridden.")
|
|
|
|
|
|
class JSONParser(BaseParser):
|
|
"""
|
|
Parses JSON-serialized data.
|
|
"""
|
|
|
|
media_type = "application/json"
|
|
renderer_class = renderers.UnicodeJSONRenderer
|
|
|
|
def parse(self, stream, media_type=None, parser_context=None):
|
|
"""
|
|
Parses the incoming bytestream as JSON and returns the resulting data.
|
|
"""
|
|
parser_context = parser_context or {}
|
|
encoding = parser_context.get("encoding", settings.DEFAULT_CHARSET)
|
|
|
|
try:
|
|
data = stream.read().decode(encoding)
|
|
return json.loads(data)
|
|
except ValueError as exc:
|
|
raise ParseError("JSON parse error - %s" % six.text_type(exc))
|
|
|
|
|
|
class FormParser(BaseParser):
|
|
"""
|
|
Parser for form data.
|
|
"""
|
|
|
|
media_type = "application/x-www-form-urlencoded"
|
|
|
|
def parse(self, stream, media_type=None, parser_context=None):
|
|
"""
|
|
Parses the incoming bytestream as a URL encoded form,
|
|
and returns the resulting QueryDict.
|
|
"""
|
|
parser_context = parser_context or {}
|
|
encoding = parser_context.get("encoding", settings.DEFAULT_CHARSET)
|
|
data = QueryDict(stream.read(), encoding=encoding)
|
|
return data
|
|
|
|
|
|
class MultiPartParser(BaseParser):
|
|
"""
|
|
Parser for multipart form data, which may include file data.
|
|
"""
|
|
|
|
media_type = "multipart/form-data"
|
|
|
|
def parse(self, stream, media_type=None, parser_context=None):
|
|
"""
|
|
Parses the incoming bytestream as a multipart encoded form,
|
|
and returns a DataAndFiles object.
|
|
|
|
`.data` will be a `QueryDict` containing all the form parameters.
|
|
`.files` will be a `QueryDict` containing all the form files.
|
|
"""
|
|
parser_context = parser_context or {}
|
|
request = parser_context["request"]
|
|
encoding = parser_context.get("encoding", settings.DEFAULT_CHARSET)
|
|
meta = request.META.copy()
|
|
meta["CONTENT_TYPE"] = media_type
|
|
upload_handlers = request.upload_handlers
|
|
|
|
try:
|
|
parser = DjangoMultiPartParser(meta, stream, upload_handlers, encoding)
|
|
data, files = parser.parse()
|
|
return DataAndFiles(data, files)
|
|
except MultiPartParserError as exc:
|
|
raise ParseError("Multipart form parse error - %s" % str(exc))
|
|
|
|
|
|
class FileUploadParser(BaseParser):
|
|
"""
|
|
Parser for file upload data.
|
|
"""
|
|
media_type = "*/*"
|
|
|
|
def parse(self, stream, media_type=None, parser_context=None):
|
|
"""
|
|
Treats the incoming bytestream as a raw file upload and returns
|
|
a `DateAndFiles` object.
|
|
|
|
`.data` will be None (we expect request body to be a file content).
|
|
`.files` will be a `QueryDict` containing one "file" element.
|
|
"""
|
|
|
|
parser_context = parser_context or {}
|
|
request = parser_context["request"]
|
|
encoding = parser_context.get("encoding", settings.DEFAULT_CHARSET)
|
|
meta = request.META
|
|
upload_handlers = request.upload_handlers
|
|
filename = self.get_filename(stream, media_type, parser_context)
|
|
|
|
# Note that this code is extracted from Django's handling of
|
|
# file uploads in MultiPartParser.
|
|
content_type = meta.get("HTTP_CONTENT_TYPE",
|
|
meta.get("CONTENT_TYPE", ""))
|
|
try:
|
|
content_length = int(meta.get("HTTP_CONTENT_LENGTH",
|
|
meta.get("CONTENT_LENGTH", 0)))
|
|
except (ValueError, TypeError):
|
|
content_length = None
|
|
|
|
# See if the handler will want to take care of the parsing.
|
|
for handler in upload_handlers:
|
|
result = handler.handle_raw_input(None,
|
|
meta,
|
|
content_length,
|
|
None,
|
|
encoding)
|
|
if result is not None:
|
|
return DataAndFiles(None, {"file": result[1]})
|
|
|
|
# This is the standard case.
|
|
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
|
|
chunk_size = min([2 ** 31 - 4] + possible_sizes)
|
|
chunks = ChunkIter(stream, chunk_size)
|
|
counters = [0] * len(upload_handlers)
|
|
|
|
for handler in upload_handlers:
|
|
try:
|
|
handler.new_file(None, filename, content_type,
|
|
content_length, encoding)
|
|
except StopFutureHandlers:
|
|
break
|
|
|
|
for chunk in chunks:
|
|
for i, handler in enumerate(upload_handlers):
|
|
chunk_length = len(chunk)
|
|
chunk = handler.receive_data_chunk(chunk, counters[i])
|
|
counters[i] += chunk_length
|
|
if chunk is None:
|
|
break
|
|
|
|
for i, handler in enumerate(upload_handlers):
|
|
file_obj = handler.file_complete(counters[i])
|
|
if file_obj:
|
|
return DataAndFiles(None, {"file": file_obj})
|
|
raise ParseError("FileUpload parse error - "
|
|
"none of upload handlers can handle the stream")
|
|
|
|
def get_filename(self, stream, media_type, parser_context):
|
|
"""
|
|
Detects the uploaded file name. First searches a "filename" url kwarg.
|
|
Then tries to parse Content-Disposition header.
|
|
"""
|
|
try:
|
|
return parser_context["kwargs"]["filename"]
|
|
except KeyError:
|
|
pass
|
|
|
|
try:
|
|
meta = parser_context["request"].META
|
|
disposition = parse_header(meta["HTTP_CONTENT_DISPOSITION"])
|
|
return disposition[1]["filename"]
|
|
except (AttributeError, KeyError):
|
|
pass
|