Code cleanup in Http*

pull/128/merge
Nils Maier 2013-09-15 21:52:02 +02:00
parent 29d569eef9
commit e2700f50a5
8 changed files with 427 additions and 397 deletions

View File

@ -144,6 +144,7 @@ std::unique_ptr<HttpResponse> HttpConnection::receiveResponse()
throw DL_RETRY_EX(EX_GOT_EOF);
}
}
const auto& proc = outstandingHttpRequests_.front()->getHttpHeaderProcessor();
if(proc->parse(socketRecvBuffer_->getBuffer(),
socketRecvBuffer_->getBufferLength())) {
@ -158,10 +159,10 @@ std::unique_ptr<HttpResponse> HttpConnection::receiveResponse()
socketRecvBuffer_->shiftBuffer(proc->getLastBytesProcessed());
outstandingHttpRequests_.pop_front();
return httpResponse;
} else {
socketRecvBuffer_->shiftBuffer(proc->getLastBytesProcessed());
return nullptr;
}
socketRecvBuffer_->shiftBuffer(proc->getLastBytesProcessed());
return nullptr;
}
bool HttpConnection::isIssued(const std::shared_ptr<Segment>& segment) const

View File

@ -58,11 +58,11 @@ class HttpRequestEntry {
private:
std::unique_ptr<HttpRequest> httpRequest_;
std::unique_ptr<HttpHeaderProcessor> proc_;
public:
HttpRequestEntry(std::unique_ptr<HttpRequest> httpRequest);
const std::unique_ptr<HttpRequest>& getHttpRequest() const
{
const std::unique_ptr<HttpRequest>& getHttpRequest() const {
return httpRequest_;
}
@ -83,13 +83,12 @@ private:
HttpRequestEntries outstandingHttpRequests_;
std::string eraseConfidentialInfo(const std::string& request);
void sendRequest
(std::unique_ptr<HttpRequest> httpRequest, std::string request);
void sendRequest(std::unique_ptr<HttpRequest> httpRequest,
std::string request);
public:
HttpConnection
(cuid_t cuid,
const std::shared_ptr<SocketCore>& socket,
const std::shared_ptr<SocketRecvBuffer>& socketRecvBuffer);
HttpConnection(cuid_t cuid, const std::shared_ptr<SocketCore>& socket,
const std::shared_ptr<SocketRecvBuffer>& socketRecvBuffer);
~HttpConnection();
/**
@ -125,8 +124,7 @@ public:
void sendPendingData();
const std::shared_ptr<SocketRecvBuffer>& getSocketRecvBuffer() const
{
const std::shared_ptr<SocketRecvBuffer>& getSocketRecvBuffer() const {
return socketRecvBuffer_;
}
};

View File

@ -56,15 +56,14 @@
namespace aria2 {
HttpDownloadCommand::HttpDownloadCommand
(cuid_t cuid,
const std::shared_ptr<Request>& req,
const std::shared_ptr<FileEntry>& fileEntry,
RequestGroup* requestGroup,
std::unique_ptr<HttpResponse> httpResponse,
const std::shared_ptr<HttpConnection>& httpConnection,
DownloadEngine* e,
const std::shared_ptr<SocketCore>& socket)
HttpDownloadCommand::HttpDownloadCommand(cuid_t cuid,
const std::shared_ptr<Request>& req,
const std::shared_ptr<FileEntry>& fileEntry,
RequestGroup* requestGroup,
std::unique_ptr<HttpResponse> httpResponse,
const std::shared_ptr<HttpConnection>& httpConnection,
DownloadEngine* e,
const std::shared_ptr<SocketCore>& socket)
: DownloadCommand(cuid, req, fileEntry, requestGroup, e, socket,
httpConnection->getSocketRecvBuffer()),
httpResponse_(std::move(httpResponse)),
@ -73,71 +72,73 @@ HttpDownloadCommand::HttpDownloadCommand
HttpDownloadCommand::~HttpDownloadCommand() {}
bool HttpDownloadCommand::prepareForNextSegment() {
bool HttpDownloadCommand::prepareForNextSegment()
{
bool downloadFinished = getRequestGroup()->downloadFinished();
if(getRequest()->isPipeliningEnabled() && !downloadFinished) {
if (getRequest()->isPipeliningEnabled() && !downloadFinished) {
auto command = make_unique<HttpRequestCommand>
(getCuid(), getRequest(), getFileEntry(),
getRequestGroup(), httpConnection_,
getDownloadEngine(), getSocket());
// Set proxy request here. aria2 sends the HTTP request specialized for
// proxy.
if(resolveProxyMethod(getRequest()->getProtocol()) == V_GET) {
if (resolveProxyMethod(getRequest()->getProtocol()) == V_GET) {
command->setProxyRequest(createProxyRequest());
}
getDownloadEngine()->addCommand(std::move(command));
return true;
} else {
const std::string& streamFilterName = getStreamFilter()->getName();
if(getRequest()->isPipeliningEnabled() ||
(getRequest()->isKeepAliveEnabled() &&
(
// Make sure that all filters are finished to pool socket
(!util::endsWith(streamFilterName, SinkStreamFilter::NAME) &&
getStreamFilter()->finished()) ||
getRequestEndOffset() ==
getFileEntry()->gtoloff(getSegments().front()->getPositionToWrite())
)
)
) {
// TODO What if server sends EOF when non-SinkStreamFilter is
// used and server didn't send Connection: close? We end up to
// pool terminated socket. In HTTP/1.1, keep-alive is default,
// so closing connection without Connection: close header means
// that server is broken or not configured properly.
getDownloadEngine()->poolSocket
(getRequest(), createProxyRequest(), getSocket());
}
// The request was sent assuming that server supported pipelining, but
// it turned out that server didn't support it.
// We detect this situation by comparing the end byte in range header
// of the response with the end byte of segment.
// If it is the same, HTTP negotiation is necessary for the next request.
if(!getRequest()->isPipeliningEnabled() &&
getRequest()->isPipeliningHint() &&
!downloadFinished) {
const std::shared_ptr<Segment>& segment = getSegments().front();
int64_t lastOffset =getFileEntry()->gtoloff
(std::min(segment->getPosition()+segment->getLength(),
getFileEntry()->getLastOffset()));
Range range = httpResponse_->getHttpHeader()->getRange();
if(lastOffset == range.endByte + 1) {
return prepareForRetry(0);
}
}
return DownloadCommand::prepareForNextSegment();
}
const std::string& streamFilterName = getStreamFilter()->getName();
if (getRequest()->isPipeliningEnabled() ||
(getRequest()->isKeepAliveEnabled() &&
(
// Make sure that all filters are finished to pool socket
(!util::endsWith(streamFilterName, SinkStreamFilter::NAME) &&
getStreamFilter()->finished()) ||
getRequestEndOffset() ==
getFileEntry()->gtoloff(getSegments().front()->getPositionToWrite())
)
)
) {
// TODO What if server sends EOF when non-SinkStreamFilter is
// used and server didn't send Connection: close? We end up to
// pool terminated socket. In HTTP/1.1, keep-alive is default,
// so closing connection without Connection: close header means
// that server is broken or not configured properly.
getDownloadEngine()->poolSocket(getRequest(), createProxyRequest(),
getSocket());
}
// The request was sent assuming that server supported pipelining, but
// it turned out that server didn't support it.
// We detect this situation by comparing the end byte in range header
// of the response with the end byte of segment.
// If it is the same, HTTP negotiation is necessary for the next request.
if (!getRequest()->isPipeliningEnabled() &&
getRequest()->isPipeliningHint() &&
!downloadFinished) {
const std::shared_ptr<Segment>& segment = getSegments().front();
int64_t lastOffset =getFileEntry()->gtoloff(
std::min(segment->getPosition()+segment->getLength(),
getFileEntry()->getLastOffset()));
auto range = httpResponse_->getHttpHeader()->getRange();
if (lastOffset == range.endByte + 1) {
return prepareForRetry(0);
}
}
return DownloadCommand::prepareForNextSegment();
}
int64_t HttpDownloadCommand::getRequestEndOffset() const
{
int64_t endByte = httpResponse_->getHttpHeader()->getRange().endByte;
if(endByte > 0) {
auto endByte = httpResponse_->getHttpHeader()->getRange().endByte;
if (endByte > 0) {
return endByte+1;
} else {
return endByte;
}
return endByte;
}
} // namespace aria2

View File

@ -46,9 +46,11 @@ class HttpDownloadCommand : public DownloadCommand {
private:
std::unique_ptr<HttpResponse> httpResponse_;
std::shared_ptr<HttpConnection> httpConnection_;
protected:
virtual bool prepareForNextSegment() CXX11_OVERRIDE;
virtual int64_t getRequestEndOffset() const CXX11_OVERRIDE;
public:
HttpDownloadCommand(cuid_t cuid,
const std::shared_ptr<Request>& req,

View File

@ -32,6 +32,7 @@
* files in the program, then also delete it here.
*/
/* copyright --> */
#include "HttpResponse.h"
#include "Request.h"
#include "Segment.h"
@ -74,40 +75,43 @@ HttpResponse::HttpResponse()
void HttpResponse::validateResponse() const
{
int statusCode = getStatusCode();
if(statusCode >= 400) {
if (statusCode >= 400) {
return;
}
if(statusCode == 304) {
if(!httpRequest_->conditionalRequest()) {
if (statusCode == 304) {
if (!httpRequest_->conditionalRequest()) {
throw DL_ABORT_EX2("Got 304 without If-Modified-Since or If-None-Match",
error_code::HTTP_PROTOCOL_ERROR);
}
} else if(statusCode == 301 ||
statusCode == 302 ||
statusCode == 303 ||
statusCode == 307) {
if(!httpHeader_->defined(HttpHeader::LOCATION)) {
}
else if (statusCode == 301 ||
statusCode == 302 ||
statusCode == 303 ||
statusCode == 307) {
if (!httpHeader_->defined(HttpHeader::LOCATION)) {
throw DL_ABORT_EX2(fmt(EX_LOCATION_HEADER_REQUIRED, statusCode),
error_code::HTTP_PROTOCOL_ERROR);
}
return;
} else if(statusCode == 200 || statusCode == 206) {
if(!httpHeader_->defined(HttpHeader::TRANSFER_ENCODING)) {
}
else if (statusCode == 200 || statusCode == 206) {
if (!httpHeader_->defined(HttpHeader::TRANSFER_ENCODING)) {
// compare the received range against the requested range
Range responseRange = httpHeader_->getRange();
if(!httpRequest_->isRangeSatisfied(responseRange)) {
throw DL_ABORT_EX2
(fmt(EX_INVALID_RANGE_HEADER,
httpRequest_->getStartByte(),
httpRequest_->getEndByte(),
httpRequest_->getEntityLength(),
responseRange.startByte,
responseRange.endByte,
responseRange.entityLength),
error_code::CANNOT_RESUME);
auto responseRange = httpHeader_->getRange();
if (!httpRequest_->isRangeSatisfied(responseRange)) {
throw DL_ABORT_EX2(fmt(EX_INVALID_RANGE_HEADER,
httpRequest_->getStartByte(),
httpRequest_->getEndByte(),
httpRequest_->getEntityLength(),
responseRange.startByte,
responseRange.endByte,
responseRange.entityLength),
error_code::CANNOT_RESUME);
}
}
} else {
}
else {
throw DL_ABORT_EX2(fmt("Unexpected status %d", statusCode),
error_code::HTTP_PROTOCOL_ERROR);
}
@ -115,18 +119,17 @@ void HttpResponse::validateResponse() const
std::string HttpResponse::determinFilename() const
{
std::string contentDisposition =
util::getContentDispositionFilename
(httpHeader_->find(HttpHeader::CONTENT_DISPOSITION));
if(contentDisposition.empty()) {
std::string file =
util::percentDecode(httpRequest_->getFile().begin(),
httpRequest_->getFile().end());
if(file.empty()) {
std::string contentDisposition = util::getContentDispositionFilename(
httpHeader_->find(HttpHeader::CONTENT_DISPOSITION));
if (contentDisposition.empty()) {
auto file = httpRequest_->getFile();
file = util::percentDecode(file.begin(), file.end());
if (file.empty()) {
return "index.html";
}
return file;
}
A2_LOG_INFO(fmt(MSG_CONTENT_DISPOSITION_DETECTED,
cuid_,
contentDisposition.c_str()));
@ -136,39 +139,36 @@ std::string HttpResponse::determinFilename() const
void HttpResponse::retrieveCookie()
{
Time now;
std::pair<std::multimap<int, std::string>::const_iterator,
std::multimap<int, std::string>::const_iterator> r =
httpHeader_->equalRange(HttpHeader::SET_COOKIE);
for(; r.first != r.second; ++r.first) {
httpRequest_->getCookieStorage()->parseAndStore
((*r.first).second, httpRequest_->getHost(), httpRequest_->getDir(),
now.getTime());
auto r = httpHeader_->equalRange(HttpHeader::SET_COOKIE);
for (; r.first != r.second; ++r.first) {
httpRequest_->getCookieStorage()->parseAndStore(
(*r.first).second,
httpRequest_->getHost(),
httpRequest_->getDir(),
now.getTime());
}
}
bool HttpResponse::isRedirect() const
{
int statusCode = getStatusCode();
return (301 == statusCode ||
302 == statusCode ||
303 == statusCode ||
307 == statusCode) &&
httpHeader_->defined(HttpHeader::LOCATION);
auto code = getStatusCode();
return (301 == code || 302 == code || 303 == code || 307 == code) &&
httpHeader_->defined(HttpHeader::LOCATION);
}
void HttpResponse::processRedirect()
{
if(httpRequest_->getRequest()->redirectUri
(util::percentEncodeMini(getRedirectURI()))) {
A2_LOG_INFO(fmt(MSG_REDIRECT,
cuid_,
httpRequest_->getRequest()->getCurrentUri().c_str()));
} else {
throw DL_RETRY_EX
(fmt("CUID#%" PRId64 " - Redirect to %s failed. It may not be a valid URI.",
cuid_,
httpRequest_->getRequest()->getCurrentUri().c_str()));
const auto& req = httpRequest_->getRequest();
if (!req->redirectUri(util::percentEncodeMini(getRedirectURI()))) {
throw DL_RETRY_EX(fmt("CUID#%" PRId64
" - Redirect to %s failed. It may not be a valid URI.",
cuid_,
req->getCurrentUri().c_str()));
}
A2_LOG_INFO(fmt(MSG_REDIRECT,
cuid_,
httpRequest_->getRequest()->getCurrentUri().c_str()));
}
const std::string& HttpResponse::getRedirectURI() const
@ -192,8 +192,8 @@ HttpResponse::getTransferEncodingStreamFilter() const
{
// TODO Transfer-Encoding header field can contains multiple tokens. We should
// parse the field and retrieve each token.
if(isTransferEncodingSpecified()) {
if(util::strieq(getTransferEncoding(), "chunked")) {
if (isTransferEncodingSpecified()) {
if (util::strieq(getTransferEncoding(), "chunked")) {
return make_unique<ChunkedDecodingStreamFilter>();
}
}
@ -219,6 +219,7 @@ HttpResponse::getContentEncodingStreamFilter() const
return make_unique<GZipDecodingStreamFilter>();
}
#endif // HAVE_ZLIB
return nullptr;
}
@ -226,30 +227,30 @@ int64_t HttpResponse::getContentLength() const
{
if(!httpHeader_) {
return 0;
} else {
return httpHeader_->getRange().getContentLength();
}
return httpHeader_->getRange().getContentLength();
}
int64_t HttpResponse::getEntityLength() const
{
if(!httpHeader_) {
return 0;
} else {
return httpHeader_->getRange().entityLength;
}
return httpHeader_->getRange().entityLength;
}
std::string HttpResponse::getContentType() const
{
if(!httpHeader_) {
return A2STR::NIL;
} else {
const std::string& ctype = httpHeader_->find(HttpHeader::CONTENT_TYPE);
std::string::const_iterator i = std::find(ctype.begin(), ctype.end(), ';');
Scip p = util::stripIter(ctype.begin(), i);
return std::string(p.first, p.second);
}
const auto& ctype = httpHeader_->find(HttpHeader::CONTENT_TYPE);
auto i = std::find(ctype.begin(), ctype.end(), ';');
Scip p = util::stripIter(ctype.begin(), i);
return std::string(p.first, p.second);
}
void HttpResponse::setHttpHeader(std::unique_ptr<HttpHeader> httpHeader)
@ -283,81 +284,93 @@ bool HttpResponse::supportsPersistentConnection() const
}
namespace {
bool parseMetalinkHttpLink(MetalinkHttpEntry& result, const std::string& s)
{
std::string::const_iterator first = std::find(s.begin(), s.end(), '<');
if(first == s.end()) {
const auto first = std::find(s.begin(), s.end(), '<');
if (first == s.end()) {
return false;
}
std::string::const_iterator last = std::find(first, s.end(), '>');
auto last = std::find(first, s.end(), '>');
if(last == s.end()) {
return false;
}
std::pair<std::string::const_iterator,
std::string::const_iterator> p = util::stripIter(first+1, last);
if(p.first == p.second) {
auto p = util::stripIter(first+1, last);
if (p.first == p.second) {
return false;
}
result.uri.assign(p.first, p.second);
last = std::find(last, s.end(), ';');
if(last != s.end()) {
if (last != s.end()) {
++last;
}
bool ok = false;
while(1) {
while (1) {
std::string name, value;
std::pair<std::string::const_iterator, bool> r =
util::nextParam(name, value, last, s.end(), ';');
auto r = util::nextParam(name, value, last, s.end(), ';');
last = r.first;
if(!r.second) {
if (!r.second) {
break;
}
if(value.empty()) {
if(name == "pref") {
result.pref = true;
}
} else {
if(name == "rel") {
if(value == "duplicate") {
ok = true;
} else {
ok = false;
}
} else if(name == "pri") {
int32_t priValue;
if(util::parseIntNoThrow(priValue, value)) {
if(1 <= priValue && priValue <= 999999) {
result.pri = priValue;
}
}
} else if(name == "geo") {
util::lowercase(value);
result.geo = value;
continue;
}
if(name == "rel") {
if(value == "duplicate") {
ok = true;
} else {
ok = false;
}
continue;
}
if (name == "pri") {
int32_t priValue;
if(util::parseIntNoThrow(priValue, value)) {
if(1 <= priValue && priValue <= 999999) {
result.pri = priValue;
}
}
continue;
}
if (name == "geo") {
util::lowercase(value);
result.geo = value;
continue;
}
}
return ok;
}
} // namespace
// Metalink/HTTP is defined by http://tools.ietf.org/html/rfc6249.
// Link header field is defined by http://tools.ietf.org/html/rfc5988.
void HttpResponse::getMetalinKHttpEntries
(std::vector<MetalinkHttpEntry>& result,
const std::shared_ptr<Option>& option) const
void HttpResponse::getMetalinKHttpEntries(
std::vector<MetalinkHttpEntry>& result,
const std::shared_ptr<Option>& option) const
{
std::pair<std::multimap<int, std::string>::const_iterator,
std::multimap<int, std::string>::const_iterator> p =
httpHeader_->equalRange(HttpHeader::LINK);
for(; p.first != p.second; ++p.first) {
auto p = httpHeader_->equalRange(HttpHeader::LINK);
for (; p.first != p.second; ++p.first) {
MetalinkHttpEntry e;
if(parseMetalinkHttpLink(e, (*p.first).second)) {
result.push_back(e);
}
}
if(!result.empty()) {
if (!result.empty()) {
std::vector<std::string> locs;
if(option->defined(PREF_METALINK_LOCATION)) {
if (option->defined(PREF_METALINK_LOCATION)) {
const std::string& loc = option->get(PREF_METALINK_LOCATION);
util::split(loc.begin(), loc.end(), std::back_inserter(locs), ',', true);
for (auto& l: locs) {
@ -370,6 +383,7 @@ void HttpResponse::getMetalinKHttpEntries
}
}
}
std::sort(result.begin(), result.end());
}
@ -378,49 +392,50 @@ void HttpResponse::getMetalinKHttpEntries
// http://tools.ietf.org/html/rfc3230.
void HttpResponse::getDigest(std::vector<Checksum>& result) const
{
using std::swap;
std::pair<std::multimap<int, std::string>::const_iterator,
std::multimap<int, std::string>::const_iterator> p =
httpHeader_->equalRange(HttpHeader::DIGEST);
for(; p.first != p.second; ++p.first) {
auto p = httpHeader_->equalRange(HttpHeader::DIGEST);
for (; p.first != p.second; ++p.first) {
const std::string& s = (*p.first).second;
std::string::const_iterator itr = s.begin();
while(1) {
while (1) {
std::string hashType, digest;
std::pair<std::string::const_iterator, bool> r =
util::nextParam(hashType, digest, itr, s.end(), ',');
auto r = util::nextParam(hashType, digest, itr, s.end(), ',');
itr = r.first;
if(!r.second) {
if (!r.second) {
break;
}
util::lowercase(hashType);
digest = base64::decode(digest.begin(), digest.end());
if(!MessageDigest::supports(hashType) ||
if (!MessageDigest::supports(hashType) ||
MessageDigest::getDigestLength(hashType) != digest.size()) {
continue;
}
result.push_back(Checksum(hashType, digest));
}
}
std::sort(result.begin(), result.end(), HashTypeStronger());
std::vector<Checksum> temp;
for(auto i = result.begin(), eoi = result.end(); i != eoi;) {
for (auto i = result.begin(), eoi = result.end(); i != eoi;) {
bool ok = true;
auto j = i+1;
auto j = i + 1;
for(; j != eoi; ++j) {
if((*i).getHashType() != (*j).getHashType()) {
break;
}
if((*i).getDigest() != (*j).getDigest()) {
if ((*i).getDigest() != (*j).getDigest()) {
ok = false;
}
}
if(ok) {
if (ok) {
temp.push_back(*i);
}
i = j;
}
swap(temp, result);
std::swap(temp, result);
}
#endif // ENABLE_MESSAGE_DIGEST

View File

@ -58,6 +58,7 @@ private:
cuid_t cuid_;
std::unique_ptr<HttpRequest> httpRequest_;
std::unique_ptr<HttpHeader> httpHeader_;
public:
HttpResponse();
@ -109,13 +110,11 @@ public:
void setHttpRequest(std::unique_ptr<HttpRequest> httpRequest);
const std::unique_ptr<HttpRequest>& getHttpRequest() const
{
const std::unique_ptr<HttpRequest>& getHttpRequest() const {
return httpRequest_;
}
void setCuid(cuid_t cuid)
{
void setCuid(cuid_t cuid) {
cuid_ = cuid;
}
@ -123,9 +122,9 @@ public:
bool supportsPersistentConnection() const;
void getMetalinKHttpEntries
(std::vector<MetalinkHttpEntry>& result,
const std::shared_ptr<Option>& option) const;
void getMetalinKHttpEntries(std::vector<MetalinkHttpEntry>& result,
const std::shared_ptr<Option>& option) const;
#ifdef ENABLE_MESSAGE_DIGEST
// Returns all digests specified in Digest header field. Sort
// strong algorithm first. Strength is defined in MessageDigest. If
@ -133,6 +132,7 @@ public:
// different value, they are all ignored.
void getDigest(std::vector<Checksum>& result) const;
#endif // ENABLE_MESSAGE_DIGEST
};
} // namespace aria2

View File

@ -32,6 +32,7 @@
* files in the program, then also delete it here.
*/
/* copyright --> */
#include "HttpResponseCommand.h"
#include "DownloadEngine.h"
#include "DownloadContext.h"
@ -84,38 +85,37 @@
namespace aria2 {
namespace {
std::unique_ptr<StreamFilter> getTransferEncodingStreamFilter
(HttpResponse* httpResponse,
std::unique_ptr<StreamFilter> delegate = nullptr)
std::unique_ptr<StreamFilter> getTransferEncodingStreamFilter(
HttpResponse* httpResponse,
std::unique_ptr<StreamFilter> delegate = nullptr)
{
if(httpResponse->isTransferEncodingSpecified()) {
if (httpResponse->isTransferEncodingSpecified()) {
auto filter = httpResponse->getTransferEncodingStreamFilter();
if(!filter) {
throw DL_ABORT_EX
(fmt(EX_TRANSFER_ENCODING_NOT_SUPPORTED,
httpResponse->getTransferEncoding().c_str()));
if (!filter) {
throw DL_ABORT_EX(fmt(EX_TRANSFER_ENCODING_NOT_SUPPORTED,
httpResponse->getTransferEncoding().c_str()));
}
filter->init();
filter->installDelegate(std::move(delegate));
return filter;
}
return delegate;
}
} // namespace
namespace {
std::unique_ptr<StreamFilter> getContentEncodingStreamFilter
(HttpResponse* httpResponse,
std::unique_ptr<StreamFilter> delegate = nullptr)
std::unique_ptr<StreamFilter> getContentEncodingStreamFilter(
HttpResponse* httpResponse,
std::unique_ptr<StreamFilter> delegate = nullptr)
{
if(httpResponse->isContentEncodingSpecified()) {
if (httpResponse->isContentEncodingSpecified()) {
auto filter = httpResponse->getContentEncodingStreamFilter();
if(!filter) {
A2_LOG_INFO
(fmt("Content-Encoding %s is specified, but the current implementation"
"doesn't support it. The decoding process is skipped and the"
"downloaded content will be still encoded.",
httpResponse->getContentEncoding().c_str()));
if (!filter) {
A2_LOG_INFO(fmt("Content-Encoding %s is specified, but the current "
"implementation doesn't support it. The decoding "
"process is skipped and the downloaded content will be "
"still encoded.",
httpResponse->getContentEncoding().c_str()));
}
filter->init();
filter->installDelegate(std::move(delegate));
@ -123,17 +123,19 @@ std::unique_ptr<StreamFilter> getContentEncodingStreamFilter
}
return delegate;
}
} // namespace
HttpResponseCommand::HttpResponseCommand
(cuid_t cuid,
const std::shared_ptr<Request>& req,
const std::shared_ptr<FileEntry>& fileEntry,
RequestGroup* requestGroup,
const std::shared_ptr<HttpConnection>& httpConnection,
DownloadEngine* e,
const std::shared_ptr<SocketCore>& s)
: AbstractCommand(cuid, req, fileEntry, requestGroup, e, s,
HttpResponseCommand::HttpResponseCommand(
cuid_t cuid,
const std::shared_ptr<Request>& req,
const std::shared_ptr<FileEntry>& fileEntry,
RequestGroup* requestGroup,
const std::shared_ptr<HttpConnection>& httpConnection,
DownloadEngine* e,
const std::shared_ptr<SocketCore>& s)
:
AbstractCommand(cuid, req, fileEntry, requestGroup, e, s,
httpConnection->getSocketRecvBuffer()),
httpConnection_(httpConnection)
{
@ -145,7 +147,7 @@ HttpResponseCommand::~HttpResponseCommand() {}
bool HttpResponseCommand::executeInternal()
{
auto httpResponse = httpConnection_->receiveResponse();
if(!httpResponse) {
if (!httpResponse) {
// The server has not responded to our request yet.
// For socket->wantRead() == true, setReadCheckSocket(socket) is already
// done in the constructor.
@ -153,7 +155,8 @@ bool HttpResponseCommand::executeInternal()
addCommandSelf();
return false;
}
// check HTTP status number
// check HTTP status code
httpResponse->validateResponse();
httpResponse->retrieveCookie();
@ -161,168 +164,175 @@ bool HttpResponseCommand::executeInternal()
// Disable persistent connection if:
// Connection: close is received or the remote server is not HTTP/1.1.
// We don't care whether non-HTTP/1.1 server returns Connection: keep-alive.
getRequest()->supportsPersistentConnection
(httpResponse->supportsPersistentConnection());
if(getRequest()->isPipeliningEnabled()) {
getRequest()->setMaxPipelinedRequest
(getOption()->getAsInt(PREF_MAX_HTTP_PIPELINING));
} else {
getRequest()->setMaxPipelinedRequest(1);
auto& req = getRequest();
req->supportsPersistentConnection(
httpResponse->supportsPersistentConnection());
if (req->isPipeliningEnabled()) {
req->setMaxPipelinedRequest(
getOption()->getAsInt(PREF_MAX_HTTP_PIPELINING));
}
else {
req->setMaxPipelinedRequest(1);
}
int statusCode = httpResponse->getStatusCode();
auto statusCode = httpResponse->getStatusCode();
auto& ctx = getDownloadContext();
auto grp = getRequestGroup();
auto& fe = getFileEntry();
if(statusCode == 304) {
if (statusCode == 304) {
int64_t totalLength = httpResponse->getEntityLength();
getFileEntry()->setLength(totalLength);
getRequestGroup()->initPieceStorage();
fe->setLength(totalLength);
grp->initPieceStorage();
getPieceStorage()->markAllPiecesDone();
// Just set checksum verification done.
getDownloadContext()->setChecksumVerified(true);
A2_LOG_NOTICE
(fmt(MSG_DOWNLOAD_ALREADY_COMPLETED,
GroupId::toHex(getRequestGroup()->getGID()).c_str(),
getRequestGroup()->getFirstFilePath().c_str()));
ctx->setChecksumVerified(true);
A2_LOG_NOTICE(fmt(MSG_DOWNLOAD_ALREADY_COMPLETED,
GroupId::toHex(grp->getGID()).c_str(),
grp->getFirstFilePath().c_str()));
poolConnection();
getFileEntry()->poolRequest(getRequest());
fe->poolRequest(req);
return true;
}
if(!getPieceStorage()) {
if (!getPieceStorage()) {
// Metalink/HTTP
if(getDownloadContext()->getAcceptMetalink()) {
if(httpHeader->defined(HttpHeader::LINK)) {
getDownloadContext()->setAcceptMetalink(false);
if (ctx->getAcceptMetalink()) {
if (httpHeader->defined(HttpHeader::LINK)) {
ctx->setAcceptMetalink(false);
std::vector<MetalinkHttpEntry> entries;
httpResponse->getMetalinKHttpEntries(entries, getOption());
for(const auto& e : entries) {
getFileEntry()->addUri(e.uri);
fe->addUri(e.uri);
A2_LOG_DEBUG(fmt("Adding URI=%s", e.uri.c_str()));
}
}
}
#ifdef ENABLE_MESSAGE_DIGEST
if(httpHeader->defined(HttpHeader::DIGEST)) {
if (httpHeader->defined(HttpHeader::DIGEST)) {
std::vector<Checksum> checksums;
httpResponse->getDigest(checksums);
for(const auto &checksum : checksums) {
if(getDownloadContext()->getHashType().empty()) {
if (ctx->getHashType().empty()) {
A2_LOG_DEBUG(fmt("Setting digest: type=%s, digest=%s",
checksum.getHashType().c_str(),
checksum.getDigest().c_str()));
getDownloadContext()->setDigest(checksum.getHashType(),
checksum.getDigest());
ctx->setDigest(checksum.getHashType(), checksum.getDigest());
break;
}
if (checkChecksum(ctx, checksum)) {
break;
} else {
if(checkChecksum(getDownloadContext(), checksum)) {
break;
}
}
}
}
#endif // ENABLE_MESSAGE_DIGEST
}
if(statusCode >= 300) {
if(statusCode == 404) {
getRequestGroup()->increaseAndValidateFileNotFoundCount();
if (statusCode >= 300) {
if (statusCode == 404) {
grp->increaseAndValidateFileNotFoundCount();
}
return skipResponseBody(std::move(httpResponse));
}
if(getFileEntry()->isUniqueProtocol()) {
if (fe->isUniqueProtocol()) {
// Redirection should be considered here. We need to parse
// original URI to get hostname.
const std::string& uri = getRequest()->getUri();
uri_split_result us;
if(uri_split(&us, uri.c_str()) == 0) {
if (uri_split(&us, uri.c_str()) == 0) {
std::string host = uri::getFieldString(us, USR_HOST, uri.c_str());
getFileEntry()->removeURIWhoseHostnameIs(host);
fe->removeURIWhoseHostnameIs(host);
}
}
if(!getPieceStorage()) {
getDownloadContext()->setAcceptMetalink(false);
if (!getPieceStorage()) {
ctx->setAcceptMetalink(false);
int64_t totalLength = httpResponse->getEntityLength();
getFileEntry()->setLength(totalLength);
if(getFileEntry()->getPath().empty()) {
getFileEntry()->setPath
(util::createSafePath
(getOption()->get(PREF_DIR), httpResponse->determinFilename()));
fe->setLength(totalLength);
if (fe->getPath().empty()) {
fe->setPath(util::createSafePath(getOption()->get(PREF_DIR),
httpResponse->determinFilename()));
}
getFileEntry()->setContentType(httpResponse->getContentType());
getRequestGroup()->preDownloadProcessing();
if(getDownloadEngine()->getRequestGroupMan()->
isSameFileBeingDownloaded(getRequestGroup())) {
throw DOWNLOAD_FAILURE_EXCEPTION2
(fmt(EX_DUPLICATE_FILE_DOWNLOAD,
getRequestGroup()->getFirstFilePath().c_str()),
error_code::DUPLICATE_DOWNLOAD);
fe->setContentType(httpResponse->getContentType());
grp->preDownloadProcessing();
if (getDownloadEngine()->getRequestGroupMan()->isSameFileBeingDownloaded(grp)) {
throw DOWNLOAD_FAILURE_EXCEPTION2(fmt(EX_DUPLICATE_FILE_DOWNLOAD,
grp->getFirstFilePath().c_str()),
error_code::DUPLICATE_DOWNLOAD);
}
// update last modified time
updateLastModifiedTime(httpResponse->getLastModifiedTime());
// If both transfer-encoding and total length is specified, we
// assume we can do segmented downloading
if(totalLength == 0 || shouldInflateContentEncoding(httpResponse.get())) {
if (totalLength == 0 || shouldInflateContentEncoding(httpResponse.get())) {
// we ignore content-length when inflate is required
getFileEntry()->setLength(0);
if(getRequest()->getMethod() == Request::METHOD_GET &&
(totalLength != 0 ||
!httpResponse->getHttpHeader()->defined(HttpHeader::CONTENT_LENGTH))){
fe->setLength(0);
if (req->getMethod() == Request::METHOD_GET && (totalLength != 0 ||
!httpResponse->getHttpHeader()->defined(HttpHeader::CONTENT_LENGTH))){
// DownloadContext::knowsTotalLength() == true only when
// server says the size of file is 0 explicitly.
getDownloadContext()->markTotalLengthIsUnknown();
}
return handleOtherEncoding(std::move(httpResponse));
} else {
return handleDefaultEncoding(std::move(httpResponse));
}
} else {
return handleDefaultEncoding(std::move(httpResponse));
}
#ifdef ENABLE_MESSAGE_DIGEST
if(!getDownloadContext()->getHashType().empty() &&
httpHeader->defined(HttpHeader::DIGEST)) {
std::vector<Checksum> checksums;
httpResponse->getDigest(checksums);
for(const auto &checksum : checksums) {
if(checkChecksum(getDownloadContext(), checksum)) {
break;
}
if (!ctx->getHashType().empty() && httpHeader->defined(HttpHeader::DIGEST)) {
std::vector<Checksum> checksums;
httpResponse->getDigest(checksums);
for (const auto &checksum : checksums) {
if (checkChecksum(ctx, checksum)) {
break;
}
}
#endif // ENABLE_MESSAGE_DIGEST
// validate totalsize
getRequestGroup()->validateTotalLength(getFileEntry()->getLength(),
httpResponse->getEntityLength());
// update last modified time
updateLastModifiedTime(httpResponse->getLastModifiedTime());
if(getRequestGroup()->getTotalLength() == 0) {
// Since total length is unknown, the file size in previously
// failed download could be larger than the size this time.
// Also we can't resume in this case too. So truncate the file
// anyway.
getPieceStorage()->getDiskAdaptor()->truncate(0);
auto teFilter = getTransferEncodingStreamFilter
(httpResponse.get(),
getContentEncodingStreamFilter(httpResponse.get()));
getDownloadEngine()->addCommand
(createHttpDownloadCommand(std::move(httpResponse),
std::move(teFilter)));
} else {
auto teFilter = getTransferEncodingStreamFilter(httpResponse.get());
getDownloadEngine()->addCommand
(createHttpDownloadCommand(std::move(httpResponse),
std::move(teFilter)));
}
return true;
}
#endif // ENABLE_MESSAGE_DIGEST
// validate totalsize
grp->validateTotalLength(fe->getLength(), httpResponse->getEntityLength());
// update last modified time
updateLastModifiedTime(httpResponse->getLastModifiedTime());
if (grp->getTotalLength() == 0) {
// Since total length is unknown, the file size in previously
// failed download could be larger than the size this time.
// Also we can't resume in this case too. So truncate the file
// anyway.
getPieceStorage()->getDiskAdaptor()->truncate(0);
auto teFilter = getTransferEncodingStreamFilter(
httpResponse.get(),
getContentEncodingStreamFilter(httpResponse.get()));
getDownloadEngine()->addCommand(
createHttpDownloadCommand(std::move(httpResponse),
std::move(teFilter)));
}
else {
auto teFilter = getTransferEncodingStreamFilter(httpResponse.get());
getDownloadEngine()->addCommand(
createHttpDownloadCommand(std::move(httpResponse),
std::move(teFilter)));
}
return true;
}
void HttpResponseCommand::updateLastModifiedTime(const Time& lastModified)
{
if(getOption()->getAsBool(PREF_REMOTE_TIME)) {
if (getOption()->getAsBool(PREF_REMOTE_TIME)) {
getRequestGroup()->updateLastModifiedTime(lastModified);
}
}
bool HttpResponseCommand::shouldInflateContentEncoding
(HttpResponse* httpResponse)
bool HttpResponseCommand::shouldInflateContentEncoding(
HttpResponse* httpResponse)
{
// Basically, on the fly inflation cannot be made with segment
// download, because in each segment we don't know where the date
@ -336,11 +346,11 @@ bool HttpResponseCommand::shouldInflateContentEncoding
(ce == "gzip" || ce == "deflate");
}
bool HttpResponseCommand::handleDefaultEncoding
(std::unique_ptr<HttpResponse> httpResponse)
bool HttpResponseCommand::handleDefaultEncoding(
std::unique_ptr<HttpResponse> httpResponse)
{
auto progressInfoFile = std::make_shared<DefaultBtProgressInfoFile>
(getDownloadContext(), std::shared_ptr<PieceStorage>{}, getOption().get());
auto progressInfoFile = std::make_shared<DefaultBtProgressInfoFile>(
getDownloadContext(), std::shared_ptr<PieceStorage>{}, getOption().get());
getRequestGroup()->adjustFilename(progressInfoFile);
getRequestGroup()->initPieceStorage();
@ -350,9 +360,10 @@ bool HttpResponseCommand::handleDefaultEncoding
}
auto checkEntry = getRequestGroup()->createCheckIntegrityEntry();
if(!checkEntry) {
if (!checkEntry) {
return true;
}
File file(getRequestGroup()->getFirstFilePath());
// We have to make sure that command that has Request object must
// have segment after PieceStorage is initialized. See
@ -363,37 +374,40 @@ bool HttpResponseCommand::handleDefaultEncoding
// we can't continue to use this socket because server sends all entity
// body instead of a segment.
// Therefore, we shutdown the socket here if pipelining is enabled.
if(getRequest()->getMethod() == Request::METHOD_GET &&
if (getRequest()->getMethod() == Request::METHOD_GET &&
segment && segment->getPositionToWrite() == 0 &&
!getRequest()->isPipeliningEnabled()) {
auto teFilter = getTransferEncodingStreamFilter(httpResponse.get());
checkEntry->pushNextCommand
(createHttpDownloadCommand(std::move(httpResponse),
std::move(teFilter)));
} else {
checkEntry->pushNextCommand(
createHttpDownloadCommand(std::move(httpResponse),
std::move(teFilter)));
}
else {
getSegmentMan()->cancelSegment(getCuid());
getFileEntry()->poolRequest(getRequest());
}
prepareForNextAction(std::move(checkEntry));
if(getRequest()->getMethod() == Request::METHOD_HEAD) {
if (getRequest()->getMethod() == Request::METHOD_HEAD) {
poolConnection();
getRequest()->setMethod(Request::METHOD_GET);
}
return true;
}
bool HttpResponseCommand::handleOtherEncoding
(std::unique_ptr<HttpResponse> httpResponse) {
bool HttpResponseCommand::handleOtherEncoding(
std::unique_ptr<HttpResponse> httpResponse)
{
// We assume that RequestGroup::getTotalLength() == 0 here
if(getOption()->getAsBool(PREF_DRY_RUN)) {
if (getOption()->getAsBool(PREF_DRY_RUN)) {
getRequestGroup()->initPieceStorage();
onDryRunFileFound();
return true;
}
if(getRequest()->getMethod() == Request::METHOD_HEAD) {
if (getRequest()->getMethod() == Request::METHOD_HEAD) {
poolConnection();
getRequest()->setMethod(Request::METHOD_GET);
return prepareForRetry(0);
@ -402,38 +416,37 @@ bool HttpResponseCommand::handleOtherEncoding
// In this context, knowsTotalLength() is true only when the file is
// really zero-length.
auto streamFilter = getTransferEncodingStreamFilter
(httpResponse.get(), getContentEncodingStreamFilter(httpResponse.get()));
auto streamFilter = getTransferEncodingStreamFilter(
httpResponse.get(), getContentEncodingStreamFilter(httpResponse.get()));
// If chunked transfer-encoding is specified, we have to read end of
// chunk markers(0\r\n\r\n, for example).
bool chunkedUsed = streamFilter &&
streamFilter->getName() == ChunkedDecodingStreamFilter::NAME;
// For zero-length file, check existing file comparing its size
if(!chunkedUsed && getDownloadContext()->knowsTotalLength() &&
getRequestGroup()->downloadFinishedByFileLength()) {
if (!chunkedUsed && getDownloadContext()->knowsTotalLength() &&
getRequestGroup()->downloadFinishedByFileLength()) {
getRequestGroup()->initPieceStorage();
#ifdef ENABLE_MESSAGE_DIGEST
// TODO Known issue: if .aria2 file exists, it will not be deleted
// on successful verification, because .aria2 file is not loaded.
// See also FtpNegotiationCommand::onFileSizeDetermined()
if(getDownloadContext()->isChecksumVerificationNeeded()) {
A2_LOG_DEBUG("Zero length file exists. Verify checksum.");
auto entry = make_unique<ChecksumCheckIntegrityEntry>
(getRequestGroup());
auto entry = make_unique<ChecksumCheckIntegrityEntry>(getRequestGroup());
entry->initValidator();
getPieceStorage()->getDiskAdaptor()->openExistingFile();
getDownloadEngine()->getCheckIntegrityMan()->pushEntry(std::move(entry));
} else
#endif // ENABLE_MESSAGE_DIGEST
{
getPieceStorage()->markAllPiecesDone();
getDownloadContext()->setChecksumVerified(true);
A2_LOG_NOTICE
(fmt(MSG_DOWNLOAD_ALREADY_COMPLETED,
GroupId::toHex(getRequestGroup()->getGID()).c_str(),
getRequestGroup()->getFirstFilePath().c_str()));
}
{
getPieceStorage()->markAllPiecesDone();
getDownloadContext()->setChecksumVerified(true);
A2_LOG_NOTICE(fmt(MSG_DOWNLOAD_ALREADY_COMPLETED,
GroupId::toHex(getRequestGroup()->getGID()).c_str(),
getRequestGroup()->getFirstFilePath().c_str()));
}
poolConnection();
return true;
}
@ -445,55 +458,55 @@ bool HttpResponseCommand::handleOtherEncoding
// Local file size becomes zero when DiskAdaptor::initAndOpenFile()
// is called. So zero-length file is complete if chunked encoding is
// not used.
if(!chunkedUsed && getDownloadContext()->knowsTotalLength()) {
if (!chunkedUsed && getDownloadContext()->knowsTotalLength()) {
A2_LOG_DEBUG("File length becomes zero and it means download completed.");
// TODO Known issue: if .aria2 file exists, it will not be deleted
// on successful verification, because .aria2 file is not loaded.
// See also FtpNegotiationCommand::onFileSizeDetermined()
#ifdef ENABLE_MESSAGE_DIGEST
if(getDownloadContext()->isChecksumVerificationNeeded()) {
if (getDownloadContext()->isChecksumVerificationNeeded()) {
A2_LOG_DEBUG("Verify checksum for zero-length file");
auto entry = make_unique<ChecksumCheckIntegrityEntry>
(getRequestGroup());
auto entry = make_unique<ChecksumCheckIntegrityEntry>(getRequestGroup());
entry->initValidator();
getDownloadEngine()->getCheckIntegrityMan()->pushEntry(std::move(entry));
} else
#endif // ENABLE_MESSAGE_DIGEST
{
getRequestGroup()->getPieceStorage()->markAllPiecesDone();
}
{
getRequestGroup()->getPieceStorage()->markAllPiecesDone();
}
poolConnection();
return true;
}
// We have to make sure that command that has Request object must
// have segment after PieceStorage is initialized. See
// AbstractCommand::execute()
getSegmentMan()->getSegmentWithIndex(getCuid(), 0);
getDownloadEngine()->addCommand
(createHttpDownloadCommand(std::move(httpResponse),
std::move(streamFilter)));
getDownloadEngine()->addCommand(
createHttpDownloadCommand(std::move(httpResponse),
std::move(streamFilter)));
return true;
}
bool HttpResponseCommand::skipResponseBody
(std::unique_ptr<HttpResponse> httpResponse)
bool HttpResponseCommand::skipResponseBody(
std::unique_ptr<HttpResponse> httpResponse)
{
auto filter = getTransferEncodingStreamFilter(httpResponse.get());
// We don't use Content-Encoding here because this response body is just
// thrown away.
auto httpResponsePtr = httpResponse.get();
auto command = make_unique<HttpSkipResponseCommand>
(getCuid(), getRequest(), getFileEntry(), getRequestGroup(),
httpConnection_, std::move(httpResponse),
getDownloadEngine(), getSocket());
auto command = make_unique<HttpSkipResponseCommand>(
getCuid(), getRequest(), getFileEntry(), getRequestGroup(),
httpConnection_, std::move(httpResponse), getDownloadEngine(),
getSocket());
command->installStreamFilter(std::move(filter));
// If request method is HEAD or the response body is zero-length,
// set command's status to real time so that avoid read check blocking
if(getRequest()->getMethod() == Request::METHOD_HEAD ||
(httpResponsePtr->getEntityLength() == 0 &&
!httpResponsePtr->isTransferEncodingSpecified())) {
if (getRequest()->getMethod() == Request::METHOD_HEAD ||
(httpResponsePtr->getEntityLength() == 0 &&
!httpResponsePtr->isTransferEncodingSpecified())) {
command->setStatusRealtime();
// If entity length == 0, then socket read/write check must be disabled.
command->disableSocketCheck();
@ -505,50 +518,52 @@ bool HttpResponseCommand::skipResponseBody
}
namespace {
bool decideFileAllocation(StreamFilter* filter)
{
#ifdef HAVE_ZLIB
for(StreamFilter* f = filter; f; f = f->getDelegate().get()){
for (StreamFilter* f = filter; f; f = f->getDelegate().get()){
// Since the compressed file's length are returned in the response header
// and the decompressed file size is unknown at this point, disable file
// allocation here.
if(f->getName() == GZipDecodingStreamFilter::NAME) {
if (f->getName() == GZipDecodingStreamFilter::NAME) {
return false;
}
}
#endif // HAVE_ZLIB
return true;
}
} // namespace
std::unique_ptr<HttpDownloadCommand>
HttpResponseCommand::createHttpDownloadCommand
(std::unique_ptr<HttpResponse> httpResponse,
std::unique_ptr<StreamFilter> filter)
HttpResponseCommand::createHttpDownloadCommand(
std::unique_ptr<HttpResponse> httpResponse,
std::unique_ptr<StreamFilter> filter)
{
auto command = make_unique<HttpDownloadCommand>
(getCuid(), getRequest(), getFileEntry(),
getRequestGroup(),
std::move(httpResponse), httpConnection_,
getDownloadEngine(), getSocket());
auto command = make_unique<HttpDownloadCommand>(
getCuid(), getRequest(), getFileEntry(), getRequestGroup(),
std::move(httpResponse), httpConnection_, getDownloadEngine(),
getSocket());
command->setStartupIdleTime(getOption()->getAsInt(PREF_STARTUP_IDLE_TIME));
command->setLowestDownloadSpeedLimit
(getOption()->getAsInt(PREF_LOWEST_SPEED_LIMIT));
if(getRequestGroup()->isFileAllocationEnabled() &&
!decideFileAllocation(filter.get())) {
command->setLowestDownloadSpeedLimit(
getOption()->getAsInt(PREF_LOWEST_SPEED_LIMIT));
if (getRequestGroup()->isFileAllocationEnabled() &&
!decideFileAllocation(filter.get())) {
getRequestGroup()->setFileAllocationEnabled(false);
}
command->installStreamFilter(std::move(filter));
getRequestGroup()->getURISelector()->tuneDownloadCommand
(getFileEntry()->getRemainingUris(), command.get());
getRequestGroup()->getURISelector()->tuneDownloadCommand(
getFileEntry()->getRemainingUris(), command.get());
return std::move(command);
}
void HttpResponseCommand::poolConnection()
{
if(getRequest()->supportsPersistentConnection()) {
if (getRequest()->supportsPersistentConnection()) {
getDownloadEngine()->poolSocket(getRequest(), createProxyRequest(),
getSocket());
}
@ -562,18 +577,17 @@ void HttpResponseCommand::onDryRunFileFound()
}
#ifdef ENABLE_MESSAGE_DIGEST
bool HttpResponseCommand::checkChecksum
(const std::shared_ptr<DownloadContext>& dctx,
const Checksum& checksum)
bool HttpResponseCommand::checkChecksum(
const std::shared_ptr<DownloadContext>& dctx, const Checksum& checksum)
{
if(dctx->getHashType() == checksum.getHashType()) {
if(dctx->getDigest() == checksum.getDigest()) {
A2_LOG_INFO("Valid hash found in Digest header field.");
return true;
} else {
if (dctx->getHashType() == checksum.getHashType()) {
if (dctx->getDigest() != checksum.getDigest()) {
throw DL_ABORT_EX("Invalid hash found in Digest header field.");
}
A2_LOG_INFO("Valid hash found in Digest header field.");
return true;
}
return false;
}
#endif // ENABLE_MESSAGE_DIGEST

View File

@ -68,9 +68,8 @@ private:
bool skipResponseBody(std::unique_ptr<HttpResponse> httpResponse);
std::unique_ptr<HttpDownloadCommand>
createHttpDownloadCommand
(std::unique_ptr<HttpResponse> httpResponse,
std::unique_ptr<StreamFilter> streamFilter);
createHttpDownloadCommand(std::unique_ptr<HttpResponse> httpResponse,
std::unique_ptr<StreamFilter> streamFilter);
void updateLastModifiedTime(const Time& lastModified);
@ -81,10 +80,10 @@ private:
// Returns true if dctx and checksum has same hash type and hash
// value. If they have same hash type but different hash value,
// throws exception. Otherwise returns false.
bool checkChecksum
(const std::shared_ptr<DownloadContext>& dctx,
const Checksum& checksum);
bool checkChecksum(const std::shared_ptr<DownloadContext>& dctx,
const Checksum& checksum);
#endif // ENABLE_MESSAGE_DIGEST
protected:
bool executeInternal();