2010-07-11 Tatsuhiro Tsujikawa <t-tujikawa@users.sourceforge.net>

Fixed the bug that segments are not filled to
	Request::getMaxPipelinedRequest().
	Make sure that trailing data of transfer encoding is read propery,
	after file data is received.
	* src/AbstractCommand.cc
	* src/DownloadCommand.cc
	* src/DownloadCommand.h
	* src/FtpDownloadCommand.cc
	* src/FtpDownloadCommand.h
	* src/HttpDownloadCommand.cc
	* src/HttpDownloadCommand.h
pull/1/head
Tatsuhiro Tsujikawa 2010-07-11 14:21:17 +00:00
parent f98a180323
commit f3b097b5af
8 changed files with 82 additions and 15 deletions

View File

@ -1,3 +1,17 @@
2010-07-11 Tatsuhiro Tsujikawa <t-tujikawa@users.sourceforge.net>
Fixed the bug that segments are not filled to
Request::getMaxPipelinedRequest().
Make sure that trailing data of transfer encoding is read propery,
after file data is received.
* src/AbstractCommand.cc
* src/DownloadCommand.cc
* src/DownloadCommand.h
* src/FtpDownloadCommand.cc
* src/FtpDownloadCommand.h
* src/HttpDownloadCommand.cc
* src/HttpDownloadCommand.h
2010-07-11 Tatsuhiro Tsujikawa <t-tujikawa@users.sourceforge.net> 2010-07-11 Tatsuhiro Tsujikawa <t-tujikawa@users.sourceforge.net>
After change request to faster one, wait at least 10 seconds. After change request to faster one, wait at least 10 seconds.

View File

@ -160,26 +160,43 @@ bool AbstractCommand::execute() {
segments_.clear(); segments_.clear();
getSegmentMan()->getInFlightSegment(segments_, getCuid()); getSegmentMan()->getInFlightSegment(segments_, getCuid());
if(!req_.isNull() && segments_.empty()) { if(!req_.isNull() && segments_.empty()) {
// TODO make this out side of socket check if.
// This command previously has assigned segments, but it is // This command previously has assigned segments, but it is
// canceled. So discard current request chain. // canceled. So discard current request chain. Plus, if no
// segment is available when http pipelining is used.
if(getLogger()->debug()) { if(getLogger()->debug()) {
getLogger()->debug("CUID#%s - It seems previously assigned segments" getLogger()->debug("CUID#%s - It seems previously assigned segments"
" are canceled. Restart.", " are canceled. Restart.",
util::itos(getCuid()).c_str()); util::itos(getCuid()).c_str());
} }
// Request::isPipeliningEnabled() == true means aria2
// accessed the remote server and discovered that the server
// supports pipelining.
if(!req_.isNull() && req_->isPipeliningEnabled()) {
e_->poolSocket(req_, createProxyRequest(), socket_);
}
return prepareForRetry(0); return prepareForRetry(0);
} }
if(req_.isNull() || req_->getMaxPipelinedRequest() == 1 || if(req_.isNull() || req_->getMaxPipelinedRequest() == 1 ||
// Why the following condition is necessary? That's because
// For single file download, SegmentMan::getSegment(cuid)
// is more efficient.
getDownloadContext()->getFileEntries().size() == 1) { getDownloadContext()->getFileEntries().size() == 1) {
if(segments_.empty()) { size_t maxSegments = req_.isNull()?1:req_->getMaxPipelinedRequest();
while(segments_.size() < maxSegments) {
SharedHandle<Segment> segment = SharedHandle<Segment> segment =
getSegmentMan()->getSegment(getCuid()); getSegmentMan()->getSegment(getCuid());
if(!segment.isNull()) { if(segment.isNull()) {
break;
} else {
segments_.push_back(segment); segments_.push_back(segment);
} }
} }
if(segments_.empty()) { if(segments_.empty()) {
// TODO socket could be pooled here if pipelining is enabled... // TODO socket could be pooled here if pipelining is
// enabled... Hmm, I don't think if pipelining is enabled
// it does not go here.
if(getLogger()->info()) { if(getLogger()->info()) {
getLogger()->info(MSG_NO_SEGMENT_AVAILABLE, getLogger()->info(MSG_NO_SEGMENT_AVAILABLE,
util::itos(getCuid()).c_str()); util::itos(getCuid()).c_str());

View File

@ -135,6 +135,17 @@ bool DownloadCommand::executeInternal() {
} else { } else {
bufSize = BUFSIZE; bufSize = BUFSIZE;
} }
// It is possible that segment is completed but we have some bytes
// of stream to read. For example, chunked encoding has "0"+CRLF
// after data. After we read data(at this moment segment is
// completed), we need another 3bytes(or more if it has extension).
if(bufSize == 0 &&
((!transferEncodingDecoder_.isNull() &&
!transferEncodingDecoder_->finished()) ||
(!contentEncodingDecoder_.isNull() &&
!contentEncodingDecoder_->finished()))) {
bufSize = 1;
}
getSocket()->readData(buf_, bufSize); getSocket()->readData(buf_, bufSize);
const SharedHandle<DiskAdaptor>& diskAdaptor = const SharedHandle<DiskAdaptor>& diskAdaptor =
@ -187,11 +198,20 @@ bool DownloadCommand::executeInternal() {
!getSocket()->wantRead() && !getSocket()->wantWrite()) { !getSocket()->wantRead() && !getSocket()->wantWrite()) {
segmentPartComplete = true; segmentPartComplete = true;
} }
} else if(!transferEncodingDecoder_.isNull() && } else {
off_t loff = getFileEntry()->gtoloff(segment->getPositionToWrite());
if(!transferEncodingDecoder_.isNull() &&
((loff == getRequestEndOffset() && transferEncodingDecoder_->finished())
|| loff < getRequestEndOffset()) &&
(segment->complete() || (segment->complete() ||
segment->getPositionToWrite() == getFileEntry()->getLastOffset())) { segment->getPositionToWrite() == getFileEntry()->getLastOffset())) {
// In this case, transferEncodingDecoder is used and // In this case, transferEncodingDecoder is used and
// Content-Length is known. // Content-Length is known. We check
// transferEncodingDecoder_->finished() only if the requested
// end offset equals to written position in file local offset;
// in other words, data in the requested ranage is all received.
// If requested end offset is greater than this segment, then
// transferEncodingDecoder_ is not finished in this segment.
segmentPartComplete = true; segmentPartComplete = true;
} else if((transferEncodingDecoder_.isNull() || } else if((transferEncodingDecoder_.isNull() ||
transferEncodingDecoder_->finished()) && transferEncodingDecoder_->finished()) &&
@ -199,6 +219,7 @@ bool DownloadCommand::executeInternal() {
contentEncodingDecoder_->finished())) { contentEncodingDecoder_->finished())) {
segmentPartComplete = true; segmentPartComplete = true;
} }
}
if(!segmentPartComplete && bufSize == 0 && if(!segmentPartComplete && bufSize == 0 &&
!getSocket()->wantRead() && !getSocket()->wantWrite()) { !getSocket()->wantRead() && !getSocket()->wantWrite()) {

View File

@ -74,6 +74,9 @@ protected:
virtual bool executeInternal(); virtual bool executeInternal();
virtual bool prepareForNextSegment(); virtual bool prepareForNextSegment();
// This is file local offset
virtual off_t getRequestEndOffset() const = 0;
public: public:
DownloadCommand(cuid_t cuid, DownloadCommand(cuid_t cuid,
const SharedHandle<Request>& req, const SharedHandle<Request>& req,

View File

@ -88,4 +88,9 @@ bool FtpDownloadCommand::prepareForNextSegment()
} }
} }
off_t FtpDownloadCommand::getRequestEndOffset() const
{
return getFileEntry()->getLength();
}
} // namespace aria2 } // namespace aria2

View File

@ -48,6 +48,7 @@ private:
SharedHandle<SocketCore> ctrlSocket_; SharedHandle<SocketCore> ctrlSocket_;
protected: protected:
virtual bool prepareForNextSegment(); virtual bool prepareForNextSegment();
virtual off_t getRequestEndOffset() const;
public: public:
FtpDownloadCommand(cuid_t cuid, FtpDownloadCommand(cuid_t cuid,
const SharedHandle<Request>& req, const SharedHandle<Request>& req,

View File

@ -129,4 +129,9 @@ bool HttpDownloadCommand::prepareForNextSegment() {
} }
} }
off_t HttpDownloadCommand::getRequestEndOffset() const
{
return httpResponse_->getHttpHeader()->getRange()->getEndByte()+1;
}
} // namespace aria2 } // namespace aria2

View File

@ -48,6 +48,7 @@ private:
SharedHandle<HttpConnection> httpConnection_; SharedHandle<HttpConnection> httpConnection_;
protected: protected:
virtual bool prepareForNextSegment(); virtual bool prepareForNextSegment();
virtual off_t getRequestEndOffset() const;
public: public:
HttpDownloadCommand(cuid_t cuid, HttpDownloadCommand(cuid_t cuid,
const SharedHandle<Request>& req, const SharedHandle<Request>& req,