diff --git a/setup.py b/setup.py index 44e6417eb..57328d28a 100644 --- a/setup.py +++ b/setup.py @@ -86,6 +86,16 @@ def fetch_requirements(path): return [r.strip() for r in fd.readlines()] +def fetch_readme(): + with open('README.md', encoding='utf-8') as f: + return f.read() + + +def get_version(): + with open('version.txt') as f: + return f.read().strip() + + if build_cuda_ext: try: import torch @@ -177,7 +187,7 @@ if build_cuda_ext: setup( name='colossalai', - version='0.0.2', + version=get_version(), packages=find_packages(exclude=('benchmark', 'docker', 'tests', @@ -188,10 +198,28 @@ setup( 'requirements', '*.egg-info',)), description='An integrated large-scale model training system with efficient parallelization techniques', + long_description=fetch_readme(), + long_description_content_type='text/markdown', + url='https://www.colossalai.org', + project_urls={ + 'Forum': 'https://github.com/hpcaitech/ColossalAI/discussions', + 'Bug Tracker': 'https://github.com/hpcaitech/ColossalAI/issues', + 'Examples': 'https://github.com/hpcaitech/ColossalAI-Examples', + 'Documentation': 'http://colossalai.readthedocs.io', + 'Github': 'https://github.com/hpcaitech/ColossalAI', + }, ext_modules=ext_modules, cmdclass={'build_ext': BuildExtension} if ext_modules else {}, install_requires=fetch_requirements('requirements/requirements.txt'), extras_require={ 'zero': fetch_requirements('requirements/requirements-zero.txt'), - } + }, + python_requires='>=3.7', + classifiers=[ + 'Programming Language :: Python :: 3', + 'License :: OSI Approved :: Apache Software License', + 'Environment :: GPU :: NVIDIA CUDA', + 'Topic :: Scientific/Engineering :: Artificial Intelligence', + 'Topic :: System :: Distributed Computing', + ], ) diff --git a/version.txt b/version.txt new file mode 100644 index 000000000..4e379d2bf --- /dev/null +++ b/version.txt @@ -0,0 +1 @@ +0.0.2