mirror of https://github.com/jumpserver/jumpserver
fix: Fixed csv file export for `0` chars is not appear
parent
7408ed0f03
commit
38175d6b57
|
@ -0,0 +1 @@
|
||||||
|
CSV_FILE_ESCAPE_CHARS = ['=', '@', '0']
|
|
@ -4,13 +4,25 @@
|
||||||
import chardet
|
import chardet
|
||||||
import unicodecsv
|
import unicodecsv
|
||||||
|
|
||||||
|
from common.utils import lazyproperty
|
||||||
from .base import BaseFileParser
|
from .base import BaseFileParser
|
||||||
|
from ..const import CSV_FILE_ESCAPE_CHARS
|
||||||
|
|
||||||
|
|
||||||
class CSVFileParser(BaseFileParser):
|
class CSVFileParser(BaseFileParser):
|
||||||
|
|
||||||
media_type = 'text/csv'
|
media_type = 'text/csv'
|
||||||
|
|
||||||
|
@lazyproperty
|
||||||
|
def match_escape_chars(self):
|
||||||
|
chars = []
|
||||||
|
for c in CSV_FILE_ESCAPE_CHARS:
|
||||||
|
dq_char = '"{}'.format(c)
|
||||||
|
sg_char = "'{}".format(c)
|
||||||
|
chars.append(dq_char)
|
||||||
|
chars.append(sg_char)
|
||||||
|
return tuple(chars)
|
||||||
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _universal_newlines(stream):
|
def _universal_newlines(stream):
|
||||||
"""
|
"""
|
||||||
|
@ -19,10 +31,19 @@ class CSVFileParser(BaseFileParser):
|
||||||
for line in stream.splitlines():
|
for line in stream.splitlines():
|
||||||
yield line
|
yield line
|
||||||
|
|
||||||
|
def __parse_row(self, row):
|
||||||
|
row_escape = []
|
||||||
|
for d in row:
|
||||||
|
if isinstance(d, str) and d.strip().startswith(self.match_escape_chars):
|
||||||
|
d = d.lstrip("'").lstrip('"')
|
||||||
|
row_escape.append(d)
|
||||||
|
return row_escape
|
||||||
|
|
||||||
def generate_rows(self, stream_data):
|
def generate_rows(self, stream_data):
|
||||||
detect_result = chardet.detect(stream_data)
|
detect_result = chardet.detect(stream_data)
|
||||||
encoding = detect_result.get("encoding", "utf-8")
|
encoding = detect_result.get("encoding", "utf-8")
|
||||||
lines = self._universal_newlines(stream_data)
|
lines = self._universal_newlines(stream_data)
|
||||||
csv_reader = unicodecsv.reader(lines, encoding=encoding)
|
csv_reader = unicodecsv.reader(lines, encoding=encoding)
|
||||||
for row in csv_reader:
|
for row in csv_reader:
|
||||||
|
row = self.__parse_row(row)
|
||||||
yield row
|
yield row
|
||||||
|
|
|
@ -6,15 +6,18 @@ import unicodecsv
|
||||||
from six import BytesIO
|
from six import BytesIO
|
||||||
|
|
||||||
from .base import BaseFileRenderer
|
from .base import BaseFileRenderer
|
||||||
|
from ..const import CSV_FILE_ESCAPE_CHARS
|
||||||
|
|
||||||
class CSVFileRenderer(BaseFileRenderer):
|
class CSVFileRenderer(BaseFileRenderer):
|
||||||
|
|
||||||
media_type = 'text/csv'
|
media_type = 'text/csv'
|
||||||
format = 'csv'
|
format = 'csv'
|
||||||
|
|
||||||
writer = None
|
writer = None
|
||||||
buffer = None
|
buffer = None
|
||||||
|
|
||||||
|
escape_chars = tuple(CSV_FILE_ESCAPE_CHARS)
|
||||||
|
|
||||||
def initial_writer(self):
|
def initial_writer(self):
|
||||||
csv_buffer = BytesIO()
|
csv_buffer = BytesIO()
|
||||||
csv_buffer.write(codecs.BOM_UTF8)
|
csv_buffer.write(codecs.BOM_UTF8)
|
||||||
|
@ -22,15 +25,17 @@ class CSVFileRenderer(BaseFileRenderer):
|
||||||
self.buffer = csv_buffer
|
self.buffer = csv_buffer
|
||||||
self.writer = csv_writer
|
self.writer = csv_writer
|
||||||
|
|
||||||
def write_row(self, row):
|
def __render_row(self, row):
|
||||||
row_escape = []
|
row_escape = []
|
||||||
for d in row:
|
for d in row:
|
||||||
if isinstance(d, str) and d.strip().startswith(('=', '@')):
|
if isinstance(d, str) and d.strip().startswith(self.escape_chars):
|
||||||
d = "'{}".format(d)
|
d = "'{}".format(d)
|
||||||
row_escape.append(d)
|
row_escape.append(d)
|
||||||
else:
|
return row_escape
|
||||||
row_escape.append(d)
|
|
||||||
self.writer.writerow(row_escape)
|
def write_row(self, row):
|
||||||
|
row = self.__render_row(row)
|
||||||
|
self.writer.writerow(row)
|
||||||
|
|
||||||
def get_rendered_value(self):
|
def get_rendered_value(self):
|
||||||
value = self.buffer.getvalue()
|
value = self.buffer.getvalue()
|
||||||
|
|
Loading…
Reference in New Issue