mirror of https://github.com/aria2/aria2
2007-06-09 Tatsuhiro Tsujikawa <tujikawa at rednoah dot com>
Added -j command-line option. * src/OptionHandlerFactory.cc (createOptionHandlers) * src/main.cc * src/ByteArrayDiskWriter.h, src/ByteArrayDiskWriter.cc Rewritten using stringstream. * src/TrackerUpdateCommand.h, src/TrackerUpdateCommand.cc Rewritten using stringstream.pull/1/head
parent
c7fbedfa0a
commit
2d522cf6b7
11
ChangeLog
11
ChangeLog
|
@ -1,3 +1,14 @@
|
|||
2007-06-09 Tatsuhiro Tsujikawa <tujikawa at rednoah dot com>
|
||||
|
||||
Added -j command-line option.
|
||||
* src/OptionHandlerFactory.cc (createOptionHandlers)
|
||||
* src/main.cc
|
||||
|
||||
* src/ByteArrayDiskWriter.h, src/ByteArrayDiskWriter.cc
|
||||
Rewritten using stringstream.
|
||||
* src/TrackerUpdateCommand.h, src/TrackerUpdateCommand.cc
|
||||
Rewritten using stringstream.
|
||||
|
||||
2007-06-05 Tatsuhiro Tsujikawa <tujikawa at rednoah dot com>
|
||||
|
||||
Make download size shown in MB, KB.
|
||||
|
|
4
TODO
4
TODO
|
@ -21,12 +21,8 @@
|
|||
* Fix DefaultBtProgressInfoFile.cc: save(), load()
|
||||
* remove blockIndex
|
||||
* Add seed mode.
|
||||
|
||||
* Rewrite ByteArrayDiskWriter, TrackerUpdateCommand with stringstream
|
||||
* Make trakcerwatchercommand and trackerUploadCommand posses requestGroup
|
||||
* consider life cycle of requestGroup and segmentMan
|
||||
|
||||
|
||||
* exit status: all downloads have been successful-> EXIT_SUCCESS,
|
||||
some of downloads have been failed -> EXIT_FAILURE
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
# General Public License and is *not* in the public domain.
|
||||
|
||||
PACKAGE = aria2c
|
||||
VERSION = 0.10.2+1
|
||||
VERSION = 0.11.0
|
||||
|
||||
SHELL = /bin/sh
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
#include "ByteArrayDiskWriter.h"
|
||||
#include "Util.h"
|
||||
|
||||
ByteArrayDiskWriter::ByteArrayDiskWriter():buf(0) {
|
||||
ByteArrayDiskWriter::ByteArrayDiskWriter() {
|
||||
}
|
||||
|
||||
ByteArrayDiskWriter::~ByteArrayDiskWriter() {
|
||||
|
@ -43,20 +43,12 @@ ByteArrayDiskWriter::~ByteArrayDiskWriter() {
|
|||
}
|
||||
|
||||
void ByteArrayDiskWriter::clear() {
|
||||
delete [] buf;
|
||||
buf = 0;
|
||||
}
|
||||
|
||||
void ByteArrayDiskWriter::init() {
|
||||
maxBufLength = 256;
|
||||
buf = new char[maxBufLength];
|
||||
bufLength = 0;
|
||||
buf.str("");
|
||||
}
|
||||
|
||||
void ByteArrayDiskWriter::initAndOpenFile(const string& filename,
|
||||
int64_t totalLength) {
|
||||
clear();
|
||||
init();
|
||||
}
|
||||
|
||||
void ByteArrayDiskWriter::openFile(const string& filename,
|
||||
|
@ -74,24 +66,21 @@ void ByteArrayDiskWriter::openExistingFile(const string& filename,
|
|||
}
|
||||
|
||||
void ByteArrayDiskWriter::writeData(const char* data, int32_t dataLength, int64_t position) {
|
||||
if(bufLength+dataLength >= maxBufLength) {
|
||||
maxBufLength = Util::expandBuffer(&buf, bufLength, bufLength+dataLength);
|
||||
if(size() < position) {
|
||||
buf.seekg(0, ios_base::end);
|
||||
for(int32_t i = size(); i < position; ++i) {
|
||||
buf.put('\0');
|
||||
}
|
||||
} else {
|
||||
buf.seekg(position, ios_base::beg);
|
||||
}
|
||||
memcpy(buf+bufLength, data, dataLength);
|
||||
bufLength += dataLength;
|
||||
buf.write(data, dataLength);
|
||||
}
|
||||
|
||||
int ByteArrayDiskWriter::readData(char* data, int32_t len, int64_t position) {
|
||||
if(position >= bufLength) {
|
||||
return 0;
|
||||
}
|
||||
int32_t readLength;
|
||||
if(position+len <= bufLength) {
|
||||
readLength = len;
|
||||
} else {
|
||||
readLength = bufLength-position;
|
||||
}
|
||||
memcpy(data, buf+position, readLength);
|
||||
return readLength;
|
||||
buf.seekg(position, ios_base::beg);
|
||||
buf.read(data, len);
|
||||
// TODO we have to call buf.clear() here? YES
|
||||
buf.clear();
|
||||
return buf.gcount();
|
||||
}
|
||||
|
||||
|
|
|
@ -36,14 +36,12 @@
|
|||
#define _D_BYTE_ARRAY_DISK_WRITER_H_
|
||||
|
||||
#include "DiskWriter.h"
|
||||
#include <sstream>
|
||||
|
||||
class ByteArrayDiskWriter : public DiskWriter {
|
||||
private:
|
||||
char* buf;
|
||||
int32_t maxBufLength;
|
||||
int32_t bufLength;
|
||||
stringstream buf;
|
||||
|
||||
void init();
|
||||
void clear();
|
||||
public:
|
||||
ByteArrayDiskWriter();
|
||||
|
@ -66,7 +64,7 @@ public:
|
|||
|
||||
virtual int64_t size() const
|
||||
{
|
||||
return bufLength;
|
||||
return buf.str().size();
|
||||
}
|
||||
|
||||
// not implemented yet
|
||||
|
@ -76,15 +74,6 @@ public:
|
|||
return "";
|
||||
}
|
||||
#endif // ENABLE_MESSAGE_DIGEST
|
||||
|
||||
const char* getByteArray() const {
|
||||
return buf;
|
||||
}
|
||||
|
||||
// can be deleted. Use size() instead.
|
||||
int getByteArrayLength() const {
|
||||
return bufLength;
|
||||
}
|
||||
};
|
||||
|
||||
#endif // _D_BYTE_ARRAY_DISK_WRITER_H_
|
||||
|
|
|
@ -72,9 +72,9 @@ DownloadEngineFactory::newConsoleEngine(const Option* op,
|
|||
}
|
||||
RequestGroups workingSet;
|
||||
RequestGroups reservedSet;
|
||||
if(op->getAsInt(PREF_MAX_SIMULTANEOUS_DOWNLOADS) < (int32_t)requestGroups.size()) {
|
||||
copy(requestGroups.begin(), requestGroups.begin()+op->getAsInt(PREF_MAX_SIMULTANEOUS_DOWNLOADS), back_inserter(workingSet));
|
||||
copy(requestGroups.begin()+op->getAsInt(PREF_MAX_SIMULTANEOUS_DOWNLOADS),
|
||||
if(op->getAsInt(PREF_MAX_CONCURRENT_DOWNLOADS) < (int32_t)requestGroups.size()) {
|
||||
copy(requestGroups.begin(), requestGroups.begin()+op->getAsInt(PREF_MAX_CONCURRENT_DOWNLOADS), back_inserter(workingSet));
|
||||
copy(requestGroups.begin()+op->getAsInt(PREF_MAX_CONCURRENT_DOWNLOADS),
|
||||
requestGroups.end(), back_inserter(reservedSet));
|
||||
} else {
|
||||
workingSet = requestGroups;
|
||||
|
@ -83,7 +83,7 @@ DownloadEngineFactory::newConsoleEngine(const Option* op,
|
|||
ConsoleDownloadEngine* e = new ConsoleDownloadEngine();
|
||||
e->option = op;
|
||||
RequestGroupManHandle requestGroupMan = new RequestGroupMan(workingSet,
|
||||
op->getAsInt(PREF_MAX_SIMULTANEOUS_DOWNLOADS));
|
||||
op->getAsInt(PREF_MAX_CONCURRENT_DOWNLOADS));
|
||||
requestGroupMan->addReservedGroup(reservedSet);
|
||||
e->_requestGroupMan = requestGroupMan;
|
||||
e->_fileAllocationMan = new FileAllocationMan();
|
||||
|
|
|
@ -54,7 +54,6 @@ SRCS = Socket.h\
|
|||
FeatureConfig.cc FeatureConfig.h\
|
||||
DownloadEngineFactory.cc DownloadEngineFactory.h\
|
||||
RequestInfo.h\
|
||||
UrlRequestInfo.cc UrlRequestInfo.h\
|
||||
SpeedCalc.cc SpeedCalc.h\
|
||||
PeerStat.h\
|
||||
BitfieldMan.cc BitfieldMan.h\
|
||||
|
|
|
@ -208,10 +208,10 @@ am__libaria2c_a_SOURCES_DIST = Socket.h SocketCore.cc SocketCore.h \
|
|||
LogFactory.h NullLogger.h TimeA2.cc TimeA2.h SharedHandle.h \
|
||||
HandleRegistry.h FeatureConfig.cc FeatureConfig.h \
|
||||
DownloadEngineFactory.cc DownloadEngineFactory.h RequestInfo.h \
|
||||
UrlRequestInfo.cc UrlRequestInfo.h SpeedCalc.cc SpeedCalc.h \
|
||||
PeerStat.h BitfieldMan.cc BitfieldMan.h BitfieldManFactory.cc \
|
||||
BitfieldManFactory.h Randomizer.h SimpleRandomizer.cc \
|
||||
SimpleRandomizer.h FileAllocator.h FileAllocationMonitor.cc \
|
||||
SpeedCalc.cc SpeedCalc.h PeerStat.h BitfieldMan.cc \
|
||||
BitfieldMan.h BitfieldManFactory.cc BitfieldManFactory.h \
|
||||
Randomizer.h SimpleRandomizer.cc SimpleRandomizer.h \
|
||||
FileAllocator.h FileAllocationMonitor.cc \
|
||||
FileAllocationMonitor.h ConsoleFileAllocationMonitor.cc \
|
||||
ConsoleFileAllocationMonitor.h ChunkChecksumValidator.cc \
|
||||
ChunkChecksumValidator.h HttpResponse.cc HttpResponse.h \
|
||||
|
@ -403,10 +403,9 @@ am__objects_3 = SocketCore.$(OBJEXT) Command.$(OBJEXT) \
|
|||
DefaultDiskWriter.$(OBJEXT) File.$(OBJEXT) Option.$(OBJEXT) \
|
||||
Base64.$(OBJEXT) CookieBox.$(OBJEXT) LogFactory.$(OBJEXT) \
|
||||
TimeA2.$(OBJEXT) FeatureConfig.$(OBJEXT) \
|
||||
DownloadEngineFactory.$(OBJEXT) UrlRequestInfo.$(OBJEXT) \
|
||||
SpeedCalc.$(OBJEXT) BitfieldMan.$(OBJEXT) \
|
||||
BitfieldManFactory.$(OBJEXT) SimpleRandomizer.$(OBJEXT) \
|
||||
FileAllocationMonitor.$(OBJEXT) \
|
||||
DownloadEngineFactory.$(OBJEXT) SpeedCalc.$(OBJEXT) \
|
||||
BitfieldMan.$(OBJEXT) BitfieldManFactory.$(OBJEXT) \
|
||||
SimpleRandomizer.$(OBJEXT) FileAllocationMonitor.$(OBJEXT) \
|
||||
ConsoleFileAllocationMonitor.$(OBJEXT) \
|
||||
ChunkChecksumValidator.$(OBJEXT) HttpResponse.$(OBJEXT) \
|
||||
HttpRequest.$(OBJEXT) AbstractProxyRequestCommand.$(OBJEXT) \
|
||||
|
@ -626,10 +625,10 @@ SRCS = Socket.h SocketCore.cc SocketCore.h Command.cc Command.h \
|
|||
LogFactory.h NullLogger.h TimeA2.cc TimeA2.h SharedHandle.h \
|
||||
HandleRegistry.h FeatureConfig.cc FeatureConfig.h \
|
||||
DownloadEngineFactory.cc DownloadEngineFactory.h RequestInfo.h \
|
||||
UrlRequestInfo.cc UrlRequestInfo.h SpeedCalc.cc SpeedCalc.h \
|
||||
PeerStat.h BitfieldMan.cc BitfieldMan.h BitfieldManFactory.cc \
|
||||
BitfieldManFactory.h Randomizer.h SimpleRandomizer.cc \
|
||||
SimpleRandomizer.h FileAllocator.h FileAllocationMonitor.cc \
|
||||
SpeedCalc.cc SpeedCalc.h PeerStat.h BitfieldMan.cc \
|
||||
BitfieldMan.h BitfieldManFactory.cc BitfieldManFactory.h \
|
||||
Randomizer.h SimpleRandomizer.cc SimpleRandomizer.h \
|
||||
FileAllocator.h FileAllocationMonitor.cc \
|
||||
FileAllocationMonitor.h ConsoleFileAllocationMonitor.cc \
|
||||
ConsoleFileAllocationMonitor.h ChunkChecksumValidator.cc \
|
||||
ChunkChecksumValidator.h HttpResponse.cc HttpResponse.h \
|
||||
|
@ -892,7 +891,6 @@ distclean-compile:
|
|||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/TrackerUpdateCommand.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/TrackerWatcherCommand.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/UriFileListParser.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/UrlRequestInfo.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/Util.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/Xml2MetalinkProcessor.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/main.Po@am__quote@
|
||||
|
|
|
@ -78,7 +78,7 @@ RequestInfos MultiUrlRequestInfo::execute() {
|
|||
|
||||
e->fillCommand();
|
||||
|
||||
// The number of simultaneous download is specified by PREF_MAX_SIMULTANEOUS_DOWNLOADS.
|
||||
// The number of simultaneous download is specified by PREF_MAX_CONCURRENT_DOWNLOADS.
|
||||
// The remaining urls are queued into FillRequestGroupCommand.
|
||||
// It observes the number of simultaneous downloads and if it is under
|
||||
// the limit, it adds RequestGroup object from its queue to DownloadEngine.
|
||||
|
|
|
@ -91,6 +91,7 @@ OptionHandlers OptionHandlerFactory::createOptionHandlers()
|
|||
handlers.push_back(new DefaultOptionHandler(PREF_USER_AGENT));
|
||||
handlers.push_back(new BooleanOptionHandler(PREF_NO_NETRC));
|
||||
handlers.push_back(new DefaultOptionHandler(PREF_INPUT_FILE));
|
||||
handlers.push_back(new NumberOptionHandler(PREF_MAX_CONCURRENT_DOWNLOADS, 1, 15));
|
||||
|
||||
return handlers;
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include "SleepCommand.h"
|
||||
#include "Util.h"
|
||||
#include "CUIDCounter.h"
|
||||
#include <sstream>
|
||||
|
||||
TrackerUpdateCommand::TrackerUpdateCommand(int cuid,
|
||||
TorrentDownloadEngine* e,
|
||||
|
@ -56,27 +57,19 @@ bool TrackerUpdateCommand::prepareForRetry() {
|
|||
return false;
|
||||
}
|
||||
|
||||
char* TrackerUpdateCommand::getTrackerResponse(size_t& trackerResponseLength) {
|
||||
int maxBufLength = 2048;
|
||||
char* buf = new char[maxBufLength];
|
||||
int bufLength = 0;
|
||||
string TrackerUpdateCommand::getTrackerResponse() {
|
||||
stringstream strm;
|
||||
char data[2048];
|
||||
try {
|
||||
while(1) {
|
||||
int dataLength = e->_requestGroupMan->getRequestGroup(0)->getSegmentMan()->diskWriter->readData(data, sizeof(data), bufLength);
|
||||
if(bufLength+dataLength >= maxBufLength) {
|
||||
maxBufLength = Util::expandBuffer(&buf, bufLength, bufLength+dataLength);
|
||||
}
|
||||
memcpy(buf+bufLength, data, dataLength);
|
||||
bufLength += dataLength;
|
||||
int dataLength = e->_requestGroupMan->getRequestGroup(0)->getSegmentMan()->diskWriter->readData(data, sizeof(data), strm.tellp());
|
||||
strm.write(data, dataLength);
|
||||
if(dataLength != sizeof(data)) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
trackerResponseLength = bufLength;
|
||||
return buf;
|
||||
return strm.str();
|
||||
} catch(RecoverableException* e) {
|
||||
delete [] buf;
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
@ -89,14 +82,12 @@ bool TrackerUpdateCommand::execute() {
|
|||
!e->_requestGroupMan->downloadFinished()) {
|
||||
return prepareForRetry();
|
||||
}
|
||||
char* trackerResponse = 0;
|
||||
size_t trackerResponseLength = 0;
|
||||
|
||||
try {
|
||||
trackerResponse = getTrackerResponse(trackerResponseLength);
|
||||
string trackerResponse = getTrackerResponse();
|
||||
|
||||
btAnnounce->processAnnounceResponse(trackerResponse,
|
||||
trackerResponseLength);
|
||||
btAnnounce->processAnnounceResponse(trackerResponse.c_str(),
|
||||
trackerResponse.size());
|
||||
while(!btRuntime->isHalt() && btRuntime->lessThanMinPeer()) {
|
||||
PeerHandle peer = peerStorage->getUnusedPeer();
|
||||
if(peer.isNull()) {
|
||||
|
@ -119,9 +110,6 @@ bool TrackerUpdateCommand::execute() {
|
|||
e->_requestGroupMan->getRequestGroup(0)->getSegmentMan()->errors++;
|
||||
delete err;
|
||||
}
|
||||
if(trackerResponse) {
|
||||
delete [] trackerResponse;
|
||||
}
|
||||
return prepareForRetry();
|
||||
}
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ private:
|
|||
TorrentDownloadEngine* e;
|
||||
const Logger* logger;
|
||||
bool prepareForRetry();
|
||||
char* getTrackerResponse(size_t& trackerResponseLength);
|
||||
string getTrackerResponse();
|
||||
public:
|
||||
TrackerUpdateCommand(int cuid,
|
||||
TorrentDownloadEngine* e,
|
||||
|
|
|
@ -190,7 +190,7 @@ RequestInfos UrlRequestInfo::execute() {
|
|||
|
||||
|
||||
|
||||
// The number of simultaneous download is specified by PREF_MAX_SIMULTANEOUS_DOWNLOADS.
|
||||
// The number of simultaneous download is specified by PREF_MAX_CONCURRENT_DOWNLOADS.
|
||||
// The remaining urls are queued into FillRequestGroupCommand.
|
||||
// It observes the number of simultaneous downloads and if it is under
|
||||
// the limit, it adds RequestGroup object from its queue to DownloadEngine.
|
||||
|
|
10
src/main.cc
10
src/main.cc
|
@ -209,6 +209,8 @@ void showUsage() {
|
|||
cout << _(" -i, --input-file=FILE Downloads URIs found in FILE. You can specify\n"
|
||||
" multiple URIs for a single entity: deliminate\n"
|
||||
" URIs by Tab in a single line.") << endl;
|
||||
cout << _(" -j, --max-concurrent-downloads=N Set maximum number of concurrent downloads.\n"
|
||||
" Default: 5") << endl;
|
||||
#ifdef ENABLE_BITTORRENT
|
||||
cout << _(" -T, --torrent-file=TORRENT_FILE The file path to .torrent file.") << endl;
|
||||
cout << _(" --follow-torrent=true|false Setting this option to false prevents aria2 to\n"
|
||||
|
@ -360,7 +362,7 @@ int main(int argc, char* argv[]) {
|
|||
op->put(PREF_CONTINUE, V_FALSE);
|
||||
op->put(PREF_USER_AGENT, "aria2");
|
||||
op->put(PREF_NO_NETRC, V_FALSE);
|
||||
op->put(PREF_MAX_SIMULTANEOUS_DOWNLOADS, "5");
|
||||
op->put(PREF_MAX_CONCURRENT_DOWNLOADS, "5");
|
||||
op->put(PREF_DIRECT_DOWNLOAD_TIMEOUT, "15");
|
||||
while(1) {
|
||||
int optIndex = 0;
|
||||
|
@ -398,6 +400,7 @@ int main(int argc, char* argv[]) {
|
|||
{ "user-agent", required_argument, 0, 'U' },
|
||||
{ "no-netrc", no_argument, 0, 'n' },
|
||||
{ "input-file", required_argument, 0, 'i' },
|
||||
{ "max-concurrent-downloads", required_argument, 0, 'j' },
|
||||
#ifdef ENABLE_BITTORRENT
|
||||
{ "torrent-file", required_argument, NULL, 'T' },
|
||||
{ "listen-port", required_argument, &lopt, 15 },
|
||||
|
@ -425,7 +428,7 @@ int main(int argc, char* argv[]) {
|
|||
{ "help", no_argument, NULL, 'h' },
|
||||
{ 0, 0, 0, 0 }
|
||||
};
|
||||
c = getopt_long(argc, argv, "Dd:o:l:s:pt:m:vhST:M:C:a:cU:ni:", longOpts, &optIndex);
|
||||
c = getopt_long(argc, argv, "Dd:o:l:s:pt:m:vhST:M:C:a:cU:ni:j:", longOpts, &optIndex);
|
||||
if(c == -1) {
|
||||
break;
|
||||
}
|
||||
|
@ -582,6 +585,9 @@ int main(int argc, char* argv[]) {
|
|||
case 'i':
|
||||
cmdstream << PREF_INPUT_FILE << "=" << optarg << "\n";
|
||||
break;
|
||||
case 'j':
|
||||
cmdstream << PREF_MAX_CONCURRENT_DOWNLOADS << "=" << optarg << "\n";
|
||||
break;
|
||||
case 'v':
|
||||
showVersion();
|
||||
exit(EXIT_SUCCESS);
|
||||
|
|
|
@ -101,7 +101,7 @@
|
|||
// value: string that your file system recognizes as a file name.
|
||||
#define PREF_INPUT_FILE "input-file"
|
||||
// value: 1*digit
|
||||
#define PREF_MAX_SIMULTANEOUS_DOWNLOADS "max-simultaneous-downloads"
|
||||
#define PREF_MAX_CONCURRENT_DOWNLOADS "max-concurrent-downloads"
|
||||
// value: 1*digit
|
||||
#define PREF_DIRECT_DOWNLOAD_TIMEOUT "direct-download-timeout"
|
||||
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
#include "ByteArrayDiskWriter.h"
|
||||
#include <string>
|
||||
#include <cppunit/extensions/HelperMacros.h>
|
||||
|
||||
using namespace std;
|
||||
|
||||
class ByteArrayDiskWriterTest:public CppUnit::TestFixture {
|
||||
|
||||
CPPUNIT_TEST_SUITE(ByteArrayDiskWriterTest);
|
||||
CPPUNIT_TEST(testWriteAndRead);
|
||||
CPPUNIT_TEST(testWriteAndRead2);
|
||||
CPPUNIT_TEST_SUITE_END();
|
||||
private:
|
||||
|
||||
public:
|
||||
void setUp() {
|
||||
}
|
||||
|
||||
void testWriteAndRead();
|
||||
void testWriteAndRead2();
|
||||
};
|
||||
|
||||
|
||||
CPPUNIT_TEST_SUITE_REGISTRATION( ByteArrayDiskWriterTest );
|
||||
|
||||
void ByteArrayDiskWriterTest::testWriteAndRead() {
|
||||
ByteArrayDiskWriter bw;
|
||||
|
||||
string msg1 = "Hello world!";
|
||||
bw.writeData(msg1.c_str(), msg1.size(), 0);
|
||||
|
||||
char buf[100];
|
||||
int32_t c = bw.readData(buf, sizeof(buf), 0);
|
||||
buf[c] = '\0';
|
||||
|
||||
CPPUNIT_ASSERT_EQUAL(msg1, string(buf));
|
||||
|
||||
// second call
|
||||
memset(buf, '\0', sizeof(buf));
|
||||
|
||||
c = bw.readData(buf, sizeof(buf), 0);
|
||||
buf[c] = '\0';
|
||||
|
||||
CPPUNIT_ASSERT_EQUAL(msg1, string(buf));
|
||||
}
|
||||
|
||||
void ByteArrayDiskWriterTest::testWriteAndRead2() {
|
||||
ByteArrayDiskWriter bw;
|
||||
|
||||
string msg1 = "Hello world!";
|
||||
bw.writeData(msg1.c_str(), msg1.size(), 16);
|
||||
|
||||
char buf[100];
|
||||
int32_t c = bw.readData(buf, sizeof(buf), 16);
|
||||
buf[c] = '\0';
|
||||
|
||||
CPPUNIT_ASSERT_EQUAL(msg1, string(buf));
|
||||
|
||||
// second call
|
||||
memset(buf, '\0', sizeof(buf));
|
||||
|
||||
c = bw.readData(buf, sizeof(buf), 16);
|
||||
buf[c] = '\0';
|
||||
|
||||
CPPUNIT_ASSERT_EQUAL(msg1, string(buf));
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
TESTS = aria2c
|
||||
check_PROGRAMS = $(TESTS)
|
||||
aria2c_SOURCES = AllTest.cc\
|
||||
ByteArrayDiskWriterTest.cc\
|
||||
RequestGroupManTest.cc\
|
||||
IteratableChecksumValidatorTest.cc\
|
||||
IteratableChunkChecksumValidatorTest.cc\
|
||||
|
|
|
@ -57,7 +57,9 @@ mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs
|
|||
CONFIG_HEADER = $(top_builddir)/config.h
|
||||
CONFIG_CLEAN_FILES =
|
||||
am__EXEEXT_1 = aria2c$(EXEEXT)
|
||||
am_aria2c_OBJECTS = AllTest.$(OBJEXT) RequestGroupManTest.$(OBJEXT) \
|
||||
am_aria2c_OBJECTS = AllTest.$(OBJEXT) \
|
||||
ByteArrayDiskWriterTest.$(OBJEXT) \
|
||||
RequestGroupManTest.$(OBJEXT) \
|
||||
IteratableChecksumValidatorTest.$(OBJEXT) \
|
||||
IteratableChunkChecksumValidatorTest.$(OBJEXT) \
|
||||
UriFileListParserTest.$(OBJEXT) PeerTest.$(OBJEXT) \
|
||||
|
@ -265,6 +267,7 @@ sysconfdir = @sysconfdir@
|
|||
target_alias = @target_alias@
|
||||
TESTS = aria2c
|
||||
aria2c_SOURCES = AllTest.cc\
|
||||
ByteArrayDiskWriterTest.cc\
|
||||
RequestGroupManTest.cc\
|
||||
IteratableChecksumValidatorTest.cc\
|
||||
IteratableChunkChecksumValidatorTest.cc\
|
||||
|
@ -423,6 +426,7 @@ distclean-compile:
|
|||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/BtRequestMessageTest.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/BtSuggestPieceMessageTest.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/BtUnchokeMessageTest.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ByteArrayDiskWriterTest.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ChunkChecksumValidatorTest.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ChunkedEncodingTest.Po@am__quote@
|
||||
@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ConsoleFileAllocationMonitorTest.Po@am__quote@
|
||||
|
|
Loading…
Reference in New Issue