summaryrefslogtreecommitdiffhomepage
path: root/test/unit/http.py
diff options
context:
space:
mode:
authorAndrei Belov <defan@nginx.com>2019-12-26 17:52:09 +0300
committerAndrei Belov <defan@nginx.com>2019-12-26 17:52:09 +0300
commit35ff5ee1e82a03e57d625230173a84c829c13257 (patch)
treec3dce5e8d50c8da9739f23b41a636931ad562e25 /test/unit/http.py
parent0ec222bbb202194327c2e76d48f0b2608b37c162 (diff)
parent55f8e31ed70910ef07db31d7f3c53b12774180f9 (diff)
downloadunit-35ff5ee1e82a03e57d625230173a84c829c13257.tar.gz
unit-35ff5ee1e82a03e57d625230173a84c829c13257.tar.bz2
Merged with the default branch.1.14.0-1
Diffstat (limited to 'test/unit/http.py')
-rw-r--r--test/unit/http.py82
1 files changed, 76 insertions, 6 deletions
diff --git a/test/unit/http.py b/test/unit/http.py
index c7e3e36d..839e91a2 100644
--- a/test/unit/http.py
+++ b/test/unit/http.py
@@ -1,5 +1,6 @@
import re
import time
+import json
import socket
import select
from unit.main import TestUnit
@@ -86,23 +87,24 @@ class TestHTTP(TestUnit):
sock.sendall(req)
+ encoding = 'utf-8' if 'encoding' not in kwargs else kwargs['encoding']
+
if TestUnit.detailed:
print('>>>')
try:
- print(req.decode('utf-8', 'ignore'))
+ print(req.decode(encoding, 'ignore'))
except UnicodeEncodeError:
print(req)
resp = ''
if 'no_recv' not in kwargs:
- enc = 'utf-8' if 'encoding' not in kwargs else kwargs['encoding']
read_timeout = (
30 if 'read_timeout' not in kwargs else kwargs['read_timeout']
)
resp = self.recvall(
sock, read_timeout=read_timeout, buff_size=read_buffer_size
- ).decode(enc)
+ ).decode(encoding)
if TestUnit.detailed:
print('<<<')
@@ -114,6 +116,15 @@ class TestHTTP(TestUnit):
if 'raw_resp' not in kwargs:
resp = self._resp_to_dict(resp)
+ headers = resp.get('headers')
+ if headers and headers.get('Transfer-Encoding') == 'chunked':
+ resp['body'] = self._parse_chunked_body(resp['body']).decode(
+ encoding
+ )
+
+ if 'json' in kwargs:
+ resp = self._parse_json(resp)
+
if 'start' not in kwargs:
sock.close()
return resp
@@ -151,7 +162,7 @@ class TestHTTP(TestUnit):
return data
def _resp_to_dict(self, resp):
- m = re.search('(.*?\x0d\x0a?)\x0d\x0a?(.*)', resp, re.M | re.S)
+ m = re.search(r'(.*?\x0d\x0a?)\x0d\x0a?(.*)', resp, re.M | re.S)
if not m:
return {}
@@ -162,12 +173,12 @@ class TestHTTP(TestUnit):
headers_lines = p.findall(headers_text)
status = re.search(
- '^HTTP\/\d\.\d\s(\d+)|$', headers_lines.pop(0)
+ r'^HTTP\/\d\.\d\s(\d+)|$', headers_lines.pop(0)
).group(1)
headers = {}
for line in headers_lines:
- m = re.search('(.*)\:\s(.*)', line)
+ m = re.search(r'(.*)\:\s(.*)', line)
if m.group(1) not in headers:
headers[m.group(1)] = m.group(2)
@@ -180,6 +191,65 @@ class TestHTTP(TestUnit):
return {'status': int(status), 'headers': headers, 'body': body}
+ def _parse_chunked_body(self, raw_body):
+ if isinstance(raw_body, str):
+ raw_body = bytes(raw_body.encode())
+
+ crlf = b'\r\n'
+ chunks = raw_body.split(crlf)
+
+ if len(chunks) < 3:
+ self.fail('Invalid chunked body')
+
+ if chunks.pop() != b'':
+ self.fail('No CRLF at the end of the body')
+
+ try:
+ last_size = int(chunks[-2], 16)
+ except:
+ self.fail('Invalid zero size chunk')
+
+ if last_size != 0 or chunks[-1] != b'':
+ self.fail('Incomplete body')
+
+ body = b''
+ while len(chunks) >= 2:
+ try:
+ size = int(chunks.pop(0), 16)
+ except:
+ self.fail('Invalid chunk size %s' % str(size))
+
+ if size == 0:
+ self.assertEqual(len(chunks), 1, 'last zero size')
+ break
+
+ temp_body = crlf.join(chunks)
+
+ body += temp_body[:size]
+
+ temp_body = temp_body[size + len(crlf) :]
+
+ chunks = temp_body.split(crlf)
+
+ return body
+
+ def _parse_json(self, resp):
+ headers = resp['headers']
+
+ self.assertIn('Content-Type', headers, 'Content-Type header set')
+ self.assertEqual(
+ headers['Content-Type'],
+ 'application/json',
+ 'Content-Type header is application/json',
+ )
+
+ resp['body'] = json.loads(resp['body'])
+
+ return resp
+
+ def getjson(self, **kwargs):
+ return self.get(json=True, **kwargs)
+
def waitforsocket(self, port):
ret = False