From 54f85a6e9aff6eea7f5fdce647e887678d5ce44c Mon Sep 17 00:00:00 2001 From: huangting4201 <1538303371@qq.com> Date: Thu, 24 Aug 2023 22:03:04 +0800 Subject: [PATCH 01/34] Merge develop to main (#233) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat(utils/writer.py): support tensorboard writer (#63) * feat(utils/writer.py): support tensorboard writer * feat(utils/writer.py): add class comment --------- Co-authored-by: 黄婷 * [Develop] Pull Main Branch (#121) * fix/fix_submodule_err (#61) * fix/fix_submodule_err --------- Co-authored-by: ChenQiaoling00 * fix issue templates (#65) * fix(tokenizer): refactor tokenizer and update usage in readme (#51) * update tokenizer example * fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73) * fix a typo in readme * in order to find InternLMTokenizer, select a lower version of Transformers --------- Co-authored-by: gouhchangjiang * [Doc] Add wechat and discord link in readme (#78) * Doc:add wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * [Docs]: add Japanese README (#43) * Add Japanese README * Update README-ja-JP.md replace message * Update README-ja-JP.md * add repetition_penalty in GenerationConfig in web_demo.py (#48) Co-authored-by: YWMditto <862779238@qq.com> * use fp16 in instruction (#80) * [Enchancement] add more options for issue template (#77) * [Enchancement] add more options for issue template * update qustion icon * fix link * Use tempfile for convert2hf.py (#23) Fix https://github.com/InternLM/InternLM/issues/50 * delete torch_dtype of README's example code (#100) * set the value of repetition_penalty to 1.0 to avoid random outputs (#99) * Update web_demo.py (#97) Remove meaningless log. * [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106) --------- Co-authored-by: ChenQiaoling00 Co-authored-by: Kai Chen Co-authored-by: Yang Gao Co-authored-by: Changjiang GOU Co-authored-by: gouhchangjiang Co-authored-by: vansin Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com> Co-authored-by: YWMditto <862779238@qq.com> Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com> Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com> Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com> Co-authored-by: Shuo Zhang Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com> * feat(core/scheduler): support pipeline parallel (#98) * feat(utils/writer.py): support tensorboard writer * feat(utils/writer.py): add class comment * feat(core): support pipeline parallel * fix(core): fix demo running error * feat(solver/optimizer): add pp zero optimizer * fix(solver/optimizer): fix word spelling error * feat(core/scheduler): add new dir scheduler in core/ * fix(core): fix ci lint error * feat(solver/optimizer): merge pp and nopp optimizer * doc(usage.md): update usage doc * feat(core/scheduler): support post func * feat(core/scheduler): add dtype para in pp sche and update func get_tensor_shape * feat(core/scheduler): add _load_micro_batch in base scheduler * feat(core/scheduler): support optimizer overlap communication in pp scheduler * feat(core/scheduler): delete data process func code * feat(core/trainer): schedule pre processing for all schedule --------- Co-authored-by: 黄婷 Co-authored-by: huangting.p * refactor(rotaryEmbedding): refactor forward (#120) * use fp16 in instruction (#80) * delete torch_dtype of README's example code (#100) * refactor the forward for rotary embedding --------- Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com> Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com> * feat(model/metrics.py): support calculating accuracy and perplexity m… (#91) * feat(model/metrics.py): support calculating accuracy and perplexity metrics * fix(model/metrics.py): fix import error * feat(train.py): minor update --------- Co-authored-by: 黄婷 Co-authored-by: huangting.p * fix(optimizer/util.py) change inf defination * [Dev] Pull Main (#139) * fix/fix_submodule_err (#61) * fix/fix_submodule_err --------- Co-authored-by: ChenQiaoling00 * fix issue templates (#65) * fix(tokenizer): refactor tokenizer and update usage in readme (#51) * update tokenizer example * fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73) * fix a typo in readme * in order to find InternLMTokenizer, select a lower version of Transformers --------- Co-authored-by: gouhchangjiang * [Doc] Add wechat and discord link in readme (#78) * Doc:add wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * [Docs]: add Japanese README (#43) * Add Japanese README * Update README-ja-JP.md replace message * Update README-ja-JP.md * add repetition_penalty in GenerationConfig in web_demo.py (#48) Co-authored-by: YWMditto <862779238@qq.com> * use fp16 in instruction (#80) * [Enchancement] add more options for issue template (#77) * [Enchancement] add more options for issue template * update qustion icon * fix link * Use tempfile for convert2hf.py (#23) Fix https://github.com/InternLM/InternLM/issues/50 * delete torch_dtype of README's example code (#100) * set the value of repetition_penalty to 1.0 to avoid random outputs (#99) * Update web_demo.py (#97) Remove meaningless log. * [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106) * docs(install.md): update dependency package transformers version to >= 4.28.0 (#124) Co-authored-by: 黄婷 * docs(LICENSE): add license (#125) * add license of colossalai and flash-attn * fix lint * modify the name * fix AutoModel map in convert2hf.py (#116) * variables are not printly as expect (#114) * feat(solver): fix code to adapt to torch2.0 and provide docker images (#128) * feat(solver): fix code to adapt to torch2.0 * docs(install.md): publish internlm environment image * docs(install.md): update dependency packages version * docs(install.md): update default image --------- Co-authored-by: 黄婷 * add demo test (#132) Co-authored-by: qa-caif-cicd * fix web_demo cache accelerate (#133) * fix(hybrid_zero_optim.py): delete math import * Update embedding.py --------- Co-authored-by: ChenQiaoling00 Co-authored-by: Kai Chen Co-authored-by: Yang Gao Co-authored-by: Changjiang GOU Co-authored-by: gouhchangjiang Co-authored-by: vansin Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com> Co-authored-by: YWMditto <862779238@qq.com> Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com> Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com> Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com> Co-authored-by: Shuo Zhang Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com> Co-authored-by: huangting4201 <1538303371@qq.com> Co-authored-by: 黄婷 Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com> Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com> Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com> Co-authored-by: qa-caif-cicd Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com> * style(solver/optimizer/utils.py): fix lint error (#147) Co-authored-by: huangting.p * feat(*): support not-flash-attn for pp and no-pp (#145) * support not flash attention for no-pp * support pipeline * modify the config * refactor the code * refactor the code * remove some unnecessary code * fix(initialize/launch.py): set default value for use_flash_attn (#158) * add default for use_flash_attn * fix lint * feat(utils/logger.py): support uniscale logger (#152) * style(internlm): fix lint error * feat(utils/logger.py): support uniscale logger * fix(utils/logger.py): fix import circular error * feat(train.py): support dashboard metric panel and fix ci train config * fix(ci_scripts/train/slurm_train.sh): fix ci train error * fix(ci_scripts/train/torchrun.sh): fix ci train error * fix(ci_scripts/train): restore ci update * fix(config.json): delete alert webhook * feat(train.py): optimize func init logger * feat(config.json): delete config.json --------- Co-authored-by: 黄婷 Co-authored-by: huangting.p * feat(utils/evaluation.py): support evaluate (#154) * style(internlm): fix lint error * feat(utils/logger.py): support uniscale logger * fix(utils/logger.py): fix import circular error * feat(train.py): support dashboard metric panel and fix ci train config * fix(ci_scripts/train/slurm_train.sh): fix ci train error * fix(ci_scripts/train/torchrun.sh): fix ci train error * feat(utils/evaluation.py): support evaluate on validation dataset * fix(utils/evaluation.py): fix demo error * fix(ci_scripts/train/ci_7B_sft.py): fix ci train error * feat(initialize/launch.py): set default value for valid_bsz and valid_every * fix(ci_scripts/train): restore ci update * docs(configs/7B_sft.py): update comment for config * fix(config.json): delete config.json * fix evaluation bug in scheduler when use_flash_attn=False * feat(scheduler/no_pipeline_scheduler.py): support micro_bsz>1 in no pp * modify the jugement in pp and no-pp scheduler * modify the data_process_func in evaluation * fix bugs when use_flash_attn=False * rename symbol * feat(configs/7B_sft.py): change para valid_bsz to valid_micro_num * feat(scheduler/no_pipeline_scheduler.py): update para set _grad_accum_batch_size --------- Co-authored-by: 黄婷 Co-authored-by: huangting.p Co-authored-by: yingtongxiong <974106207@qq.com> * feat(*): support no apex (#166) * support no-apex * add default for use_apex * fix lint * modify the RMSNormTorch * remove some comments * remove use_apex parameter * remove some unnecessary code * refactor(*): refactor the code with no-apex (#170) * support no-apex * add default for use_apex * fix lint * modify the RMSNormTorch * remove some comments * remove use_apex parameter * remove some unnecessary code * optimize the code including import * remove the import RMSNorm * remove warnings * refactor(scheduler): rewrite pipeline scheduler (#138) * refactor(scheduler): rewrite pipeline scheduler * fix(*): fix pipeline scheduler bugs * fix(*): fix merge bug * feat(*): update codes with todo tag * feat(*): add comments * feat(internlm/core/scheduler): update recv_prev/next logic * feat(utils/evaluation.py): update sche metric hook for valid --------- Co-authored-by: huangting.p * feat(*): support fp32 training (#155) * support float32 training * fix lint * add adaptation in model/utils.py * remove some unnecessary code * fix lint * feat(optim): add support for fp32 zero * Revert "Merge pull request #2 from SolenoidWGT/fp32_zero" This reverts commit 53fc50b0e52f12466e8dc8ec14c5e22b217537c8, reversing changes made to 40f24d0a73fff5c083e11c18d4a07ad16aaabab3. revert commit * merge develop * Update utils.py * support fp32 in zero optimizer * modify the dtype --------- Co-authored-by: wangguoteng.p * feat(*): support sequence_parallel (#180) * support sequence_parallel for no pipeline * sequence_parallel does not support no-flash-attn * support sequence parallel for pipeline * add memory profiler * Update 13B.py * add memory profiler * fix evaluation bug * remove some unnecessary code * remove some unnecessary code * Update parallel_context.py * modify the config * remove memory profiler * modify the config * support selective dropout * feat(monitor): support monitor and alert (#175) * feat(monitor): support monitor and alert * feat(monitor.py): fix demo error * feat(monitor.py): move cmd monitor args to config file * feat(hybrid_zero_optim.py): if overflow occurs send alert msg * feat(monitor.py): remove alert msg filter * feat(monitor.py): optimize class MonitorTracker * feat(monitor.py): optimize code * feat(monitor.py): optimize code * feat(monitor.py): optimize code * feat(monitor.py): optimize code * feat(train.py): update print to log * style(ci): fix lint error * fix(utils/evaluation.py): remove useless code * fix(model/modeling_internlm.py): fix lint error --------- Co-authored-by: huangting4201 * feat(ckpt): add async upload and ckpt snapshot (#161) * use fp16 in instruction (#80) * delete torch_dtype of README's example code (#100) * feat(ckpt): support async ckpt upload and ckpt snapshot --------- Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com> Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com> Co-authored-by: wangguoteng.p * feat(ckpt): add auto ckpt load and singal quit (#189) Co-authored-by: wangguoteng.p * Revert "feat(ckpt): add auto ckpt load and singal quit (#189)" (#192) This reverts commit a45a91bb843cf0b10b8b014a6ef35e695871f91b. * refactor(solver/optimizer): improve optimizer memory (#193) * refactor(solver/optimizer): improve optimizer memory * feat(data): remove useless dataset type ids map * Feat/optimizer (#194) * feat(optimier.py): reduce memory footprint and avoid _check_overflow call * feat(optimier.py): reduce memory footprint and avoid _check_overflow call * feat(optimizer.py): overlap compute norm with allreduce * update var and function name * update function compute norm (#197) Co-authored-by: ChenQiaoling00 * feat(optimizer/hybrid_zero_optim.py): overlap gradients last bucket allreduce and compute norm (#196) * support gradients allreduce and compute norm overlap * fix para set error * remove timer cal_norm for testing * feat(optimizer/hybrid_zero_optim.py): support group global norm * format(lint): fix lint error * feat(optimizer/store.py): update code based on comment --------- Co-authored-by: ChenQiaoling00 Co-authored-by: huangting4201 <1538303371@qq.com> * fix(ci): fix ci train error (#199) * fix/ci train error (#200) * fix(ci): fix ci train error * fix(ci): fix ci train error * fix(ci): fix ci train error * fix(train.py): fix scheduler metric hook skip error (#204) * Merge main to develop (#203) * fix/fix_submodule_err (#61) * fix/fix_submodule_err --------- Co-authored-by: ChenQiaoling00 * fix issue templates (#65) * fix(tokenizer): refactor tokenizer and update usage in readme (#51) * update tokenizer example * fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73) * fix a typo in readme * in order to find InternLMTokenizer, select a lower version of Transformers --------- Co-authored-by: gouhchangjiang * [Doc] Add wechat and discord link in readme (#78) * Doc:add wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * [Docs]: add Japanese README (#43) * Add Japanese README * Update README-ja-JP.md replace message * Update README-ja-JP.md * add repetition_penalty in GenerationConfig in web_demo.py (#48) Co-authored-by: YWMditto <862779238@qq.com> * use fp16 in instruction (#80) * [Enchancement] add more options for issue template (#77) * [Enchancement] add more options for issue template * update qustion icon * fix link * Use tempfile for convert2hf.py (#23) Fix https://github.com/InternLM/InternLM/issues/50 * delete torch_dtype of README's example code (#100) * set the value of repetition_penalty to 1.0 to avoid random outputs (#99) * Update web_demo.py (#97) Remove meaningless log. * [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106) * docs(install.md): update dependency package transformers version to >= 4.28.0 (#124) Co-authored-by: 黄婷 * docs(LICENSE): add license (#125) * add license of colossalai and flash-attn * fix lint * modify the name * fix AutoModel map in convert2hf.py (#116) * variables are not printly as expect (#114) * feat(solver): fix code to adapt to torch2.0 and provide docker images (#128) * feat(solver): fix code to adapt to torch2.0 * docs(install.md): publish internlm environment image * docs(install.md): update dependency packages version * docs(install.md): update default image --------- Co-authored-by: 黄婷 * add demo test (#132) Co-authored-by: qa-caif-cicd * fix web_demo cache accelerate (#133) * Doc: add twitter link (#141) * Feat add checkpoint fraction (#151) * feat(config): add checkpoint_fraction into config * feat: remove checkpoint_fraction from configs/7B_sft.py --------- Co-authored-by: wangguoteng.p * [Doc] update deployment guide to keep consistency with lmdeploy (#136) * update deployment guide * fix error * use llm partition (#159) Co-authored-by: qa-caif-cicd * test(ci_scripts): clean test data after test, remove unnecessary global variables, and other optimizations (#165) * test: optimization of ci scripts(variables, test data cleaning, etc). * chore(workflows): disable ci job on push. * fix: update partition * test(ci_scripts): add install requirements automaticlly,trigger event about lint check and other optimizations (#174) * add pull_request in lint check * use default variables in ci_scripts * fix format * check and install requirements automaticlly * fix format --------- Co-authored-by: qa-caif-cicd * feat(profiling): add a simple memory profiler (#89) * feat(profiling): add simple memory profiler * feat(profiling): add profiling argument * feat(CI_workflow): Add PR & Issue auto remove workflow (#184) * feat(ci_workflow): Add PR & Issue auto remove workflow Add a workflow for stale PR & Issue auto remove - pr & issue well be labeled as stale for inactive in 7 days - staled PR & Issue well be remove in 7 days - run this workflow every day on 1:30 a.m. * Update stale.yml * feat(bot): Create .owners.yml for Auto Assign (#176) * Create .owners.yml: for issue/pr assign automatically * Update .owners.yml * Update .owners.yml fix typo * [feat]: add pal reasoning script (#163) * [Feat] Add PAL inference script * Update README.md * Update tools/README.md Co-authored-by: BigDong * Update tools/pal_inference.py Co-authored-by: BigDong * Update pal script * Update README.md * restore .ore-commit-config.yaml * Update tools/README.md Co-authored-by: BigDong * Update tools/README.md Co-authored-by: BigDong * Update pal inference script * Update READMD.md * Update internlm/utils/interface.py Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com> * Update pal script * Update pal script * Update script * Add docstring * Update format * Update script * Update script * Update script --------- Co-authored-by: BigDong Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com> * test(ci_scripts): add timeout settings and clean work after the slurm job (#185) * restore pr test on develop branch * add mask * add post action to cancel slurm job * remove readonly attribute on job log * add debug info * debug job log * try stdin * use stdin * set default value avoid error * try setting readonly on job log * performance echo * remove debug info * use squeue to check slurm job status * restore the lossed parm * litmit retry times * use exclusive to avoid port already in use * optimize loop body * remove partition * add {} for variables * set env variable for slurm partition --------- Co-authored-by: qa-caif-cicd * refactor(tools): move interface.py and import it to web_demo (#195) * move interface.py and import it to web_demo * typo * fix(ci): fix lint error * fix(ci): fix lint error --------- Co-authored-by: Sun Peng Co-authored-by: ChenQiaoling00 Co-authored-by: Kai Chen Co-authored-by: Yang Gao Co-authored-by: Changjiang GOU Co-authored-by: gouhchangjiang Co-authored-by: vansin Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com> Co-authored-by: YWMditto <862779238@qq.com> Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com> Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com> Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com> Co-authored-by: Shuo Zhang Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com> Co-authored-by: 黄婷 Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com> Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com> Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com> Co-authored-by: qa-caif-cicd Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com> Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com> Co-authored-by: wangguoteng.p Co-authored-by: lvhan028 Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com> Co-authored-by: cx <759046501@qq.com> Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com> Co-authored-by: del-zhenwu Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com> Co-authored-by: BigDong Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com> Co-authored-by: huangting4201 * fix(pipeline_scheduler.py): fix tensor shape err and comm block (#210) * feat(train.py): support torch profiler (#201) * feat(train.py): support torch profiling * feat(train.py): optimize initialize_llm_profile * feat(train.py): profiling with tp0 and dp0 * move sequence parallel context manager to evalation func * fix lint * move the process for type_ids to load_new_batch * fix lint --------- Co-authored-by: yingtongxiong <974106207@qq.com> * feat(ckpt): add auto ckpt load and singal quit (#216) Co-authored-by: wangguoteng.p * feat(memory_profiler): improve memory profiler (#217) * Feat/overlap_bcast_forward (#218) * feat/support bcast forward overlao * feat/optimize the bcast call * feat/optimize the bcast call * feat/optimize the bcast call * fix lint * fix lint * fix lint * fix lint * add torch.cuda.synchronize in save_checkpoint --------- Co-authored-by: sunpeng * fix(*): move sequence_parallel to parallel config (#224) * move sequence_parallel to parallel config * set the sequece_parallel default value is False * fix lint * fix lint * fix lint * Feat/example training internlm (#212) * feat(train/training_internlm.py): move common init funcs to internlm/train * feat(train/training_internlm.py): update some public funcs * feat(train/training_internlm.py): update some public funcs * feat(evaluation.py): adapt evaluate to streaming dataset * feat(train/training_internlm.py): minor update based on comments * fix(training_internlm.py): set train dataloader persistent_workers true only when num_worker>0 * fix(training_internlm.py): fix demo error * feat(data/utils.py): add new dataset type code for streaming dataset (#225) * test(model): support fp32 with flash_attn (#223) * support tf32 with flash * move autocast to attention * fix lint * fix lint * fix lint * fix lint * fix some bugs in model * modify the convert dtype * fix(pipeline): modify the sequence_parallel in pipeline (#227) * move sequence_parallel to parallel config * set the sequece_parallel default value is False * fix lint * fix lint * fix lint * modify the sequence_parallel in pp * feat(init): add skip args check flag and add zero overlap flag (#222) * feat(init): add skip args check flag * fix(optim): add param overlap enable flag * fix(ci): fix train error (#228) Co-authored-by: huangting4201 * fix(writer): fix tensorboard resume bug (#229) * fix(train.py): fix overflow grad norm error (#230) * feat(ckpt): add train config into ckpt (#231) --------- Co-authored-by: 黄婷 Co-authored-by: Sun Peng Co-authored-by: ChenQiaoling00 Co-authored-by: Kai Chen Co-authored-by: Yang Gao Co-authored-by: Changjiang GOU Co-authored-by: gouhchangjiang Co-authored-by: vansin Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com> Co-authored-by: YWMditto <862779238@qq.com> Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com> Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com> Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com> Co-authored-by: Shuo Zhang Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com> Co-authored-by: huangting.p Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com> Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com> Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com> Co-authored-by: qa-caif-cicd Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com> Co-authored-by: yingtongxiong <974106207@qq.com> Co-authored-by: cx <759046501@qq.com> Co-authored-by: wangguoteng.p Co-authored-by: huangting4201 Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com> Co-authored-by: lvhan028 Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com> Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com> Co-authored-by: del-zhenwu Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com> Co-authored-by: BigDong Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com> --- .github/workflows/demo_in_readme.yaml | 3 +- .gitignore | 4 +- .pre-commit-config.yaml | 2 +- ci_scripts/train/ci_7B_sft.py | 1 + ci_scripts/train/load_ckpt.sh | 2 +- ci_scripts/train/slurm_train.sh | 2 +- ci_scripts/train/torchrun.sh | 2 +- configs/7B_sft.py | 50 +- doc/en/usage.md | 2 +- doc/usage.md | 2 +- internlm/core/communication/__init__.py | 32 + internlm/core/communication/p2p.py | 582 ++++++++ internlm/core/communication/utils.py | 125 ++ internlm/core/context/parallel_context.py | 1 - internlm/core/naive_amp.py | 18 +- internlm/core/no_pipeline_scheduler.py | 279 ---- internlm/core/scheduler/__init__.py | 12 + internlm/core/scheduler/base_scheduler.py | 187 +++ .../core/scheduler/no_pipeline_scheduler.py | 192 +++ internlm/core/scheduler/pipeline_scheduler.py | 1293 +++++++++++++++++ internlm/core/trainer.py | 21 +- internlm/data/batch_sampler.py | 5 - internlm/data/dataset.py | 56 + internlm/data/packed_dataset.py | 49 +- internlm/data/utils.py | 33 +- internlm/initialize/__init__.py | 8 +- internlm/initialize/initialize_trainer.py | 59 +- internlm/initialize/launch.py | 210 ++- internlm/model/__init__.py | 2 + internlm/model/embedding.py | 19 +- internlm/model/linear.py | 69 +- internlm/model/metrics.py | 263 ++++ internlm/model/modeling_internlm.py | 25 +- internlm/model/multi_head_attention.py | 48 +- internlm/model/norm.py | 46 + internlm/model/utils.py | 136 ++ internlm/monitor/__init__.py | 4 + internlm/monitor/alert.py | 53 + internlm/monitor/monitor.py | 226 +++ internlm/monitor/utils.py | 32 + .../solver/optimizer/hybrid_zero_optim.py | 397 +++-- internlm/solver/optimizer/store.py | 38 + internlm/solver/optimizer/utils.py | 270 +++- internlm/train/__init__.py | 19 + internlm/train/training_internlm.py | 414 ++++++ internlm/utils/common.py | 50 +- internlm/utils/evaluation.py | 168 +++ internlm/utils/logger.py | 57 + internlm/utils/megatron_timers.py | 5 +- internlm/utils/model_checkpoint.py | 424 +++++- internlm/utils/parallel.py | 13 + internlm/utils/registry.py | 4 +- internlm/utils/simple_memory_profiler.py | 226 +-- internlm/utils/storage_manager.py | 389 ++++- internlm/utils/writer.py | 142 ++ tools/README.md | 1 - tools/README_EN.md | 1 - train.py | 586 +++----- web_demo.py | 64 +- 59 files changed, 6108 insertions(+), 1315 deletions(-) create mode 100644 internlm/core/communication/__init__.py create mode 100644 internlm/core/communication/p2p.py create mode 100644 internlm/core/communication/utils.py delete mode 100644 internlm/core/no_pipeline_scheduler.py create mode 100644 internlm/core/scheduler/__init__.py create mode 100644 internlm/core/scheduler/base_scheduler.py create mode 100644 internlm/core/scheduler/no_pipeline_scheduler.py create mode 100644 internlm/core/scheduler/pipeline_scheduler.py create mode 100644 internlm/data/dataset.py create mode 100644 internlm/model/metrics.py create mode 100644 internlm/model/norm.py create mode 100644 internlm/monitor/__init__.py create mode 100644 internlm/monitor/alert.py create mode 100644 internlm/monitor/monitor.py create mode 100644 internlm/monitor/utils.py create mode 100644 internlm/train/__init__.py create mode 100644 internlm/train/training_internlm.py create mode 100644 internlm/utils/evaluation.py create mode 100644 internlm/utils/writer.py diff --git a/.github/workflows/demo_in_readme.yaml b/.github/workflows/demo_in_readme.yaml index 59fcf38..7a330ed 100644 --- a/.github/workflows/demo_in_readme.yaml +++ b/.github/workflows/demo_in_readme.yaml @@ -1,5 +1,5 @@ name: demo-in-readme -on: +on: pull_request: branches: - "main" @@ -110,7 +110,6 @@ jobs: srun -p ${SLURM_PARTITION} --job-name=${GITHUB_RUN_ID}-${GITHUB_JOB} --gpus-per-task=2 python ../ci_scripts/model/loaded_as_transformer.py cd .. rm -rf $GITHUB_WORKSPACE/hf_ckpt - load-chat-model-in-hf: if: ${{ always() }} needs: check-requirements diff --git a/.gitignore b/.gitignore index 8565108..8992a0f 100644 --- a/.gitignore +++ b/.gitignore @@ -115,6 +115,7 @@ venv.bak/ *.pkl *.pkl.json *.log.json +*.trace.json docs/modelzoo_statistics.md mmdet/.mim work_dirs/ @@ -142,4 +143,5 @@ core.* # Run llm_ckpts -memory_trace \ No newline at end of file +events.* +memory_trace diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 60bf4a0..19cd7c8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -49,5 +49,5 @@ repos: args: [ '--rcfile=.pylintrc', - '--disable=C0114,C0415,W0212,W0235,W0238,W0621,C0103,R1735,C2801,E0402,C0412,W0719,R1728,W1514,W0718,W0105,W0707,C0209,W0703' + '--disable=C0114,C0415,W0212,W0235,W0238,W0621,C0103,R1735,C2801,E0402,C0412,W0719,R1728,W1514,W0718,W0105,W0707,C0209,W0703,W1203' ] \ No newline at end of file diff --git a/ci_scripts/train/ci_7B_sft.py b/ci_scripts/train/ci_7B_sft.py index bfb7e3b..fea45e1 100644 --- a/ci_scripts/train/ci_7B_sft.py +++ b/ci_scripts/train/ci_7B_sft.py @@ -15,6 +15,7 @@ VOCAB_SIZE = 103168 SAVE_CKPT_FOLDER = "local:llm_ckpts" # LOAD_CKPT_FOLDER = "local:llm_ckpts/49" ckpt = dict( + enable_save_ckpt=True, # Path to save training ckpt. save_ckpt_folder=SAVE_CKPT_FOLDER, # Path to continue training ckpt (load model weights and scheduler/context states). diff --git a/ci_scripts/train/load_ckpt.sh b/ci_scripts/train/load_ckpt.sh index c68dee0..413dba4 100644 --- a/ci_scripts/train/load_ckpt.sh +++ b/ci_scripts/train/load_ckpt.sh @@ -5,7 +5,7 @@ set -x readonly CKPTS_PATH="$GITHUB_WORKSPACE/llm_ckpts" readonly CKPTS40_PATH="$GITHUB_WORKSPACE/llm_ckpts/40" readonly CKPTS40_OUTPUT="${CKPTS40_PATH}/*.pt" -expected_num=21 +expected_num=22 exit_code=0 source ./ci_scripts/common/basic_func.sh diff --git a/ci_scripts/train/slurm_train.sh b/ci_scripts/train/slurm_train.sh index 2f32111..19d7c9b 100644 --- a/ci_scripts/train/slurm_train.sh +++ b/ci_scripts/train/slurm_train.sh @@ -5,7 +5,7 @@ set -x readonly CKPTS_PATH="$GITHUB_WORKSPACE/llm_ckpts" readonly CKPTS20_PATH="$GITHUB_WORKSPACE/llm_ckpts/20" readonly CKPTS20_OUTPUT="${CKPTS20_PATH}/*.pt" -expected_num=21 +expected_num=22 exit_code=0 source ./ci_scripts/common/basic_func.sh diff --git a/ci_scripts/train/torchrun.sh b/ci_scripts/train/torchrun.sh index 1f08af3..8870761 100644 --- a/ci_scripts/train/torchrun.sh +++ b/ci_scripts/train/torchrun.sh @@ -5,7 +5,7 @@ set -x readonly CKPTS_PATH="$GITHUB_WORKSPACE/llm_ckpts" readonly CKPTS20_PATH="$GITHUB_WORKSPACE/llm_ckpts/20" readonly CKPTS_OUTPUT="${CKPTS20_PATH}/*.pt" -expected_num=21 +expected_num=22 exit_code=0 source ./ci_scripts/common/basic_func.sh diff --git a/configs/7B_sft.py b/configs/7B_sft.py index 17e52fc..1f1993f 100644 --- a/configs/7B_sft.py +++ b/configs/7B_sft.py @@ -7,31 +7,43 @@ MLP_RATIO = 8 / 3 NUM_LAYER = 32 VOCAB_SIZE = 103168 +MODEL_ONLY_FOLDER = "local:llm_ckpts/xxxx" # Ckpt folder format: # fs: 'local:/mnt/nfs/XXX' -# oss: 'boto3:s3://model_weights/XXX' -MODEL_ONLY_FOLDER = "local:llm_ckpts/xxxx" SAVE_CKPT_FOLDER = "local:llm_ckpts" LOAD_CKPT_FOLDER = "local:llm_ckpts/49" + +# boto3 Ckpt folder format: +# import os +# BOTO3_IP = os.environ["BOTO3_IP"] # boto3 bucket endpoint +# SAVE_CKPT_FOLDER = f"boto3:s3://model_weights.{BOTO3_IP}/internlm" +# LOAD_CKPT_FOLDER = f"boto3:s3://model_weights.{BOTO3_IP}/internlm/snapshot/1/" +CHECKPOINT_EVERY = 50 ckpt = dict( - # Path to save training ckpt. - save_ckpt_folder=SAVE_CKPT_FOLDER, - # Path to continue training ckpt (load model weights and scheduler/context states). - # load_ckpt_folder=LOAD_CKPT_FOLDER, - # Path to initialize with given model weights. - # load_model_only_folder=MODEL_ONLY_FOLDER, - checkpoint_every=50, - # Wheter to load optimizer states when continuing training. - load_optimizer=True, + enable_save_ckpt=False, # enable ckpt save. + save_ckpt_folder=SAVE_CKPT_FOLDER, # Path to save training ckpt. + # load_ckpt_folder=LOAD_CKPT_FOLDER, # Ckpt path to resume training(load weights and scheduler/context states). + # load_model_only_folder=MODEL_ONLY_FOLDER, # Path to initialize with given model weights. + load_optimizer=True, # Wheter to load optimizer states when continuing training. + checkpoint_every=CHECKPOINT_EVERY, + async_upload=True, # async ckpt upload. (only work for boto3 ckpt) + async_upload_tmp_folder="/dev/shm/internlm_tmp_ckpt/", # path for temporarily files during asynchronous upload. + snapshot_ckpt_folder="/".join([SAVE_CKPT_FOLDER, "snapshot"]), # directory for snapshot ckpt storage path. + oss_snapshot_freq=int(CHECKPOINT_EVERY / 2), # snapshot ckpt save frequency. ) TRAIN_FOLDER = "/path/to/dataset" +VALID_FOLDER = "/path/to/dataset" data = dict( seq_len=SEQ_LEN, # micro_num means the number of micro_batch contained in one gradient update micro_num=4, # packed_length = micro_bsz * SEQ_LEN micro_bsz=2, + # defaults to the value of micro_num + valid_micro_num=4, + # defaults to 0, means disable evaluate + valid_every=50, pack_sample_into_one=False, total_steps=50000, skip_batches="", @@ -39,6 +51,7 @@ data = dict( # Datasets with less than 50 rows will be discarded min_length=50, # train_folder=TRAIN_FOLDER, + # valid_folder=VALID_FOLDER, ) grad_scaler = dict( @@ -62,7 +75,8 @@ grad_scaler = dict( hybrid_zero_optimizer = dict( # Enable low_level_optimzer overlap_communication - zero_overlap_communication=True, + overlap_sync_grad=True, + overlap_sync_param=True, # bucket size for nccl communication params reduce_bucket_size=512 * 1024 * 1024, # grad clipping @@ -107,9 +121,11 @@ model = dict( num_layers=NUM_LAYER, mlp_ratio=MLP_RATIO, apply_post_layer_norm=False, - dtype="torch.bfloat16", + dtype="torch.float16", # Support: "torch.float16", "torch.half", "torch.bfloat16", "torch.float32", "torch.tf32" norm_type="rmsnorm", layer_norm_epsilon=1e-5, + use_flash_attn=True, + num_chunks=1, # if num_chunks > 1, interleaved pipeline scheduler is used. ) """ zero1 parallel: @@ -118,11 +134,15 @@ zero1 parallel: 2. if zero1 == 1, zero is not used, and all dp groups retain the full amount of model parameters. 3. zero1 > 1 and zero1 <= dp world size, the world size of zero is a subset of dp world size. For smaller models, it is usually a better choice to split the parameters within nodes with a setting <= 8. -pipeline parallel: pipeline parallel size, only 1 is accepted currently. -tensor parallel: tensor parallel size, usually the number of GPUs per node, only 1 is accepted currently. +pipeline parallel (dict): + 1. size: int, the size of pipeline parallel. + 2. interleaved_overlap: bool, enable/disable communication overlap when using interleaved pipeline scheduler. +tensor parallel: tensor parallel size, usually the number of GPUs per node. """ parallel = dict( zero1=8, + pipeline=dict(size=1, interleaved_overlap=True), + sequence_parallel=False, ) cudnn_deterministic = False diff --git a/doc/en/usage.md b/doc/en/usage.md index b533bdc..0f62ebc 100644 --- a/doc/en/usage.md +++ b/doc/en/usage.md @@ -174,7 +174,7 @@ parallel = dict( - When `size <= 0`, the size of the zero1 process group is equal to the size of the data parallel process group, so the optimizer state parameters will be split within the data parallel range. - When `size == 1`, zero1 is not used, and all data parallel groups retain the complete optimizer state parameters. - When `size > 1` and `size <= data_parallel_world_size`, the zero1 process group is a subset of the data parallel process group. -- pipeline: pipeline parallel size, currently only supports 1, default value is 1 +- pipeline: pipeline parallel size, default value is 1 - tensor: tensor parallel size, usually the number of GPUs per node, default value is 1 Note: `Data parallel size = Total number of GPUs / Pipeline parallel size / Tensor parallel size` diff --git a/doc/usage.md b/doc/usage.md index 44116de..8c9a455 100644 --- a/doc/usage.md +++ b/doc/usage.md @@ -159,7 +159,7 @@ parallel = dict( - 当`size <= 0`,则 zero1 进程组的大小等于数据并行进程组的大小,因此优化器状态参数将在数据并行范围内分配 - 当`size == 1`,则不使用 zero1 ,所有数据并行组保留完整的优化器状态参数 - 当`size > 1`且`size <= data_parallel_world_size`,则 zero1 进程组是数据并行进程组的子集 -- pipeline:流水线并行大小,目前只支持 1,默认值为 1 +- pipeline:流水线并行大小,默认值为 1 - tensor:张量并行大小,通常是每个节点的 GPU 数量,默认值为 1 注意:`数据并行大小 = 总的 GPU 数目 / 流水线并行大小 / 张量并行大小` diff --git a/internlm/core/communication/__init__.py b/internlm/core/communication/__init__.py new file mode 100644 index 0000000..a42b9ea --- /dev/null +++ b/internlm/core/communication/__init__.py @@ -0,0 +1,32 @@ +from .p2p import ( + AsynCommunicator, + recv_backward, + recv_forward, + send_backward, + send_backward_and_recv_next_backward_async, + send_backward_recv_backward, + send_backward_recv_forward, + send_forward, + send_forward_and_recv_next_forward_async, + send_forward_backward_recv_forward_backward, + send_forward_recv_backward, + send_forward_recv_forward, +) +from .utils import recv_obj_meta, send_obj_meta + +__all__ = [ + "send_forward", + "send_forward_recv_forward", + "send_forward_backward_recv_forward_backward", + "send_backward", + "send_backward_recv_backward", + "send_backward_recv_forward", + "send_forward_recv_backward", + "recv_backward", + "recv_forward", + "send_obj_meta", + "recv_obj_meta", + "send_backward_and_recv_next_backward_async", + "send_forward_and_recv_next_forward_async", + "AsynCommunicator", +] diff --git a/internlm/core/communication/p2p.py b/internlm/core/communication/p2p.py new file mode 100644 index 0000000..e707661 --- /dev/null +++ b/internlm/core/communication/p2p.py @@ -0,0 +1,582 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +# adopted from https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/communication + +import operator +from functools import reduce +from typing import List, Tuple, Union + +import torch +import torch.distributed as dist + +from internlm.core.context import ParallelMode +from internlm.core.context import global_context as gpc +from internlm.utils.common import get_current_device + +from .utils import gather_split_1d_tensor, split_tensor_into_1d_equal_chunks + +TensorShape = Union[torch.Size, List[int], Tuple[int]] + + +def _get_tensor_shape(tensor_shape: TensorShape, chunk_tensor: bool = False) -> Tuple[TensorShape, bool]: + """get the exact tensor shape when communicating and return whether the tensor is a chunk + + Args: + tensor_shape (:class:`torch.Size`): shape of tensor + chunk_tensor (bool, optional): whether to chunk tensor, defaults to False + + Returns: + Tuple[Union[:class:`torch.Size`, List[int], Tuple[int]], bool]: exact tensor shape, whether to chunk tensor + """ + if chunk_tensor: + tensor_chunk_shape = reduce(operator.mul, tensor_shape, 1) + tensor_parallel_world_size = gpc.get_world_size(ParallelMode.TENSOR) + if tensor_chunk_shape % tensor_parallel_world_size == 0: + tensor_chunk_shape = tensor_chunk_shape // tensor_parallel_world_size + else: + tensor_chunk_shape = tensor_shape + chunk_tensor = False + else: + tensor_chunk_shape = tensor_shape + return tensor_chunk_shape, chunk_tensor + + +def create_recv_buffer_with_shapes(recv_shapes, dtype, scatter_gather_tensors): + if isinstance(recv_shapes, torch.Size): + recv_chunk_shape, recv_split = _get_tensor_shape(recv_shapes, scatter_gather_tensors) + buffer_recv = torch.empty(recv_chunk_shape, requires_grad=True, device=get_current_device(), dtype=dtype) + return buffer_recv, recv_split + buffer_recv = [] + for recv_shape in recv_shapes: + recv_chunk_shape, recv_split = _get_tensor_shape(recv_shape, scatter_gather_tensors) + tensor_recv = torch.empty(recv_chunk_shape, requires_grad=True, device=get_current_device(), dtype=dtype) + buffer_recv.append(tensor_recv) + return buffer_recv, recv_split + + +def process_object_to_send(object_send, scatter_gather_tensors): + if isinstance(object_send, torch.Tensor): + send_split = _get_tensor_shape(object_send.shape, scatter_gather_tensors)[1] + if send_split: + object_send = split_tensor_into_1d_equal_chunks(object_send) + return object_send + + object_send_list = [] + for tensor_send in object_send: + send_split = _get_tensor_shape(tensor_send.shape, scatter_gather_tensors)[1] + if send_split: + object_send_list.append(split_tensor_into_1d_equal_chunks(tensor_send)) + else: + object_send_list.append(tensor_send) + object_send = tuple(object_send_list) + + return object_send + + +def filling_ops_queue(obj, comm_op, comm_rank, ops_queue): + if isinstance(obj, torch.Tensor): + op_to_add = dist.P2POp(comm_op, obj, comm_rank) + ops_queue.append(op_to_add) + else: + for tensor_to_comm in obj: + op_to_add = dist.P2POp(comm_op, tensor_to_comm, comm_rank) + ops_queue.append(op_to_add) + + +def _communicate( + object_send_next: Union[torch.Tensor, List[torch.Tensor]] = None, + object_send_prev: Union[torch.Tensor, List[torch.Tensor]] = None, + recv_prev: bool = False, + recv_next: bool = False, + recv_prev_shape: Union[torch.Size, List[torch.Size]] = None, + recv_next_shape: Union[torch.Size, List[torch.Size]] = None, + prev_rank: int = None, + next_rank: int = None, + dtype: torch.dtype = None, + scatter_gather_tensors: bool = False, +) -> Tuple[Union[torch.Tensor, List[torch.Tensor]]]: + """ + Adapted from megatron.p2p_communication. + Communicate tensors between stages. Used as helper method in other + communication methods that are used in pipeline schedule. + Takes the following arguments: + object_send_next (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): tensor to send to next rank + (no tensor sent if set to None). + object_send_prev (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): tensor to send to prev rank + (no tensor sent if set to None). + recv_prev (bool): boolean for whether tensor should be received from + previous rank. + recv_next (bool): boolean for whether tensor should be received from + next rank. + recv_prev_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): shape of the tensor to be received + from the previous stage, defualts to None. + recv_next_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): shape of the tensor to be received + from the next stage, defualts to None. + prev_rank (int): the rank of the previous pipeline stage, defualts to None, + next_rank (int): the rank of the next pipeline stage, defualts to None, + dtype (torch.dtype): data type of intermediate buffers, defaults to None + scatter_gather_tensors (bool): whether to scatter and gather tensor between pipeline stages, defaults to False + + Returns: + Tuple[Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]]: returns tensor_recv_prev, tensor_recv_next + """ + + # Create placeholder tensors for receive in forward and backward directions + # if needed. + tensor_recv_prev = None + tensor_recv_next = None + + if recv_prev: + assert recv_prev_shape is not None + tensor_recv_prev, recv_prev_split = create_recv_buffer_with_shapes( + recv_prev_shape, dtype, scatter_gather_tensors + ) + + if recv_next: + assert recv_next_shape is not None + tensor_recv_next, recv_next_split = create_recv_buffer_with_shapes( + recv_next_shape, dtype, scatter_gather_tensors + ) + + if object_send_prev is not None or recv_prev: + if prev_rank is None: + prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE) + + if object_send_next is not None or recv_next: + if next_rank is None: + next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE) + + if object_send_prev is not None: + object_send_prev = process_object_to_send(object_send_prev, scatter_gather_tensors) + + if object_send_next is not None: + object_send_next = process_object_to_send(object_send_next, scatter_gather_tensors) + + ops = [] + if object_send_prev is not None: + filling_ops_queue(object_send_prev, dist.isend, prev_rank, ops) + + if tensor_recv_prev is not None: + filling_ops_queue(tensor_recv_prev, dist.irecv, prev_rank, ops) + + if tensor_recv_next is not None: + filling_ops_queue(tensor_recv_next, dist.irecv, next_rank, ops) + + if object_send_next is not None: + filling_ops_queue(object_send_next, dist.isend, next_rank, ops) + + if len(ops) > 0: + reqs = dist.batch_isend_irecv(ops) + for req in reqs: + req.wait() + # To protect against race condition when using batch_isend_irecv(). + torch.cuda.synchronize() + + if recv_prev and recv_prev_split: + if isinstance(tensor_recv_prev, torch.Tensor): + tensor_recv_prev = gather_split_1d_tensor(tensor_recv_prev).view(recv_prev_shape).requires_grad_() + else: + for index in range(len(tensor_recv_prev)): + tensor_recv_prev[index] = ( + gather_split_1d_tensor(tensor_recv_prev[index]).view(recv_prev_shape[index]).requires_grad_() + ) + + if recv_next and recv_next_split: + if isinstance(tensor_recv_next, torch.Tensor): + tensor_recv_next = gather_split_1d_tensor(tensor_recv_next).view(recv_next_shape).requires_grad_() + else: + for index in range(len(tensor_recv_next)): + tensor_recv_next[index] = ( + gather_split_1d_tensor(tensor_recv_next[index]).view(recv_next_shape[index]).requires_grad_() + ) + + return tensor_recv_prev, tensor_recv_next + + +def recv_forward( + input_tensor_shape, prev_rank=None, dtype=torch.float, scatter_gather_tensors=False +) -> Union[torch.Tensor, List[torch.Tensor]]: + """Copy the forward output from the previous stage in pipeline as the input tensor of this stage. + + Args: + input_tensor_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor + to be received. + prev_rank (int, optional): The rank of the source of the tensor. + + Returns: + Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input tensor or input tensor list. + """ + input_tensor, _ = _communicate( + recv_prev=True, + recv_prev_shape=input_tensor_shape, + prev_rank=prev_rank, + dtype=dtype, + scatter_gather_tensors=scatter_gather_tensors, + ) + return input_tensor + + +def recv_backward( + output_grad_shape, next_rank=None, dtype=torch.float, scatter_gather_tensors=False +) -> Union[torch.Tensor, List[torch.Tensor]]: + """Copy the gradient tensor from the next stage in pipeline as the input gradient of this stage. + + Args: + output_grad_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor + to be received. + next_rank (int, optional): The rank of the source of the tensor. + + Returns: + Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input gradient tensor or gradident tensor list. + """ + _, output_tensor_grad = _communicate( + recv_next=True, + recv_next_shape=output_grad_shape, + next_rank=next_rank, + dtype=dtype, + scatter_gather_tensors=scatter_gather_tensors, + ) + return output_tensor_grad + + +def send_forward(output_tensor, next_rank=None, scatter_gather_tensors=False) -> None: + """Sends the input tensor to the next stage in pipeline. + + Args: + output_tensor (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent. + next_rank (int, optional): The rank of the recipient of the tensor. + """ + _communicate(object_send_next=output_tensor, next_rank=next_rank, scatter_gather_tensors=scatter_gather_tensors) + + +def send_backward(input_tensor_grad, prev_rank=None, scatter_gather_tensors=False) -> None: + """Sends the gradient tensor to the previous stage in pipeline. + + Args: + input_tensor_grad (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent + prev_rank (int, optional): The rank of the recipient of the tensor + """ + + _communicate(object_send_prev=input_tensor_grad, prev_rank=prev_rank, scatter_gather_tensors=scatter_gather_tensors) + + +def send_forward_recv_backward( + output_tensor, output_grad_shape, next_rank=None, dtype=torch.float, scatter_gather_tensors=False +) -> Union[torch.Tensor, List[torch.Tensor]]: + """Batched communication operation. Sends the input tensor to the + next stage in pipeline, while receives the gradient tensor from the + next stage in pipeline as the input gradient tensor of this stage. + + Args: + output_tensor (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent. + output_grad_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor + to be received. + + Returns: + Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input gradient tensor. + """ + _, output_tensor_grad = _communicate( + object_send_next=output_tensor, + recv_next=output_grad_shape is not None, + recv_next_shape=output_grad_shape, + next_rank=next_rank, + dtype=dtype, + scatter_gather_tensors=scatter_gather_tensors, + ) + + return output_tensor_grad + + +def send_backward_recv_forward( + input_tensor_grad, + input_tensor_shape, + prev_rank=None, + dtype=torch.float, + scatter_gather_tensors=False, +) -> Union[torch.Tensor, List[torch.Tensor]]: + """Batched communication operation. Sends the gradient tensor to the + previous stage in pipeline, while receives the output tensor from the + previous stage in pipeline as the input of this stage. + + Args: + input_tensor_grad (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent. + input_tensor_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor + to be received. + + Returns: + Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input tensor. + """ + input_tensor, _ = _communicate( + object_send_prev=input_tensor_grad, + recv_prev=input_tensor_shape is not None, + recv_prev_shape=input_tensor_shape, + prev_rank=prev_rank, + dtype=dtype, + scatter_gather_tensors=scatter_gather_tensors, + ) + + return input_tensor + + +def send_forward_recv_forward( + output_tensor, + input_tensor_shape, + prev_rank=None, + next_rank=None, + dtype=torch.float, + scatter_gather_tensors=False, +) -> Union[torch.Tensor, List[torch.Tensor]]: + """Batched communication operation. Sends the input tensor to the + next stage in pipeline, while receives the output tensor from the + previous stage in pipeline as the input of this stage. + + Args: + output_tensor (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent. + input_tensor_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor + to be received. + + Returns: + Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input tensor. + """ + input_tensor, _ = _communicate( + object_send_next=output_tensor, + recv_prev=input_tensor_shape is not None, + recv_prev_shape=input_tensor_shape, + prev_rank=prev_rank, + next_rank=next_rank, + dtype=dtype, + scatter_gather_tensors=scatter_gather_tensors, + ) + return input_tensor + + +def send_backward_recv_backward( + input_tensor_grad, + output_grad_shape, + prev_rank=None, + next_rank=None, + dtype=torch.float, + scatter_gather_tensors=False, +) -> Union[torch.Tensor, List[torch.Tensor]]: + """Batched communication operation. Sends the gradient tensor to the + previous stage in pipeline, while receives the gradient tensor from the + next member in pipeline as the input of this stage. + + Args: + input_tensor_grad (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor to be sent. + output_grad_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor + to be received. + + Returns: + Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: The input gradient tensor. + """ + _, output_tensor_grad = _communicate( + object_send_prev=input_tensor_grad, + recv_next=output_grad_shape is not None, + recv_next_shape=output_grad_shape, + prev_rank=prev_rank, + next_rank=next_rank, + dtype=dtype, + scatter_gather_tensors=scatter_gather_tensors, + ) + return output_tensor_grad + + +def send_forward_backward_recv_forward_backward( + output_tensor, + input_tensor_grad, + input_tensor_shape, + output_grad_shape, + prev_rank=None, + next_rank=None, + dtype=torch.float, + scatter_gather_tensors=False, +) -> Tuple[Union[torch.Tensor, List[torch.Tensor]]]: + """Batched communication operation. Sends the input tensor to the next stage in pipeline and + the gradient tensor to the previous stage, while receives the input gradient tensor from the + next stage and the input tensor from the previous stage. + + Args: + output_tensor (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor sent to the next. + input_tensor_grad (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Tensor sent to the previous. + input_tensor_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor received + from the previous. + output_grad_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the tensor received + from the next. + + Returns: + Tuple(Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]], Union[:class:`torch.Tensor`, + List[:class:`torch.Tensor`]]): (the input tensor, the input gradient tensor) + """ + input_tensor, output_tensor_grad = _communicate( + object_send_next=output_tensor, + object_send_prev=input_tensor_grad, + recv_prev=input_tensor_shape is not None, + recv_next=output_grad_shape is not None, + recv_prev_shape=input_tensor_shape, + recv_next_shape=output_grad_shape, + prev_rank=prev_rank, + next_rank=next_rank, + dtype=dtype, + scatter_gather_tensors=scatter_gather_tensors, + ) + return input_tensor, output_tensor_grad + + +def send_forward_and_recv_next_forward_async( + output_tensor, + recv_prev_shape: Union[torch.Size, List[torch.Size]] = None, + dtype: torch.dtype = None, + scatter_gather_tensors=False, +): + """send forward output to next rank and recv forward input from prev rank""" + + reqs = [] + tensor_recv_prev = None + + # prepare send opreations + if output_tensor is not None: + next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE) + + output_tensor = process_object_to_send(output_tensor, scatter_gather_tensors) + + if isinstance(output_tensor, torch.Tensor): + reqs.append(dist.P2POp(dist.isend, output_tensor, next_rank)) + else: + for tensor_to_comm in output_tensor: + reqs.append(dist.P2POp(dist.isend, tensor_to_comm, next_rank)) + + # prepare receive opreations + if recv_prev_shape is not None: + prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE) + # create receive buffer + tensor_recv_prev, recv_prev_split = create_recv_buffer_with_shapes( + recv_prev_shape, dtype, scatter_gather_tensors + ) + # generate async receive opterations + if isinstance(tensor_recv_prev, torch.Tensor): + reqs.append(dist.P2POp(dist.irecv, tensor_recv_prev, prev_rank)) + else: + for tensor_to_comm in tensor_recv_prev: + reqs.append(dist.P2POp(dist.irecv, tensor_to_comm, prev_rank)) + + if len(reqs) > 0: + reqs = dist.batch_isend_irecv(reqs) + + # return and do other things + yield + + # check communication completed + for req in reqs: + req.wait() + # To protect against race condition when using batch_isend_irecv() + torch.cuda.synchronize() + + # Process received data + if recv_prev_shape is not None and recv_prev_split: + if isinstance(tensor_recv_prev, torch.Tensor): + tensor_recv_prev = gather_split_1d_tensor(tensor_recv_prev).view(recv_prev_shape).requires_grad_() + else: + for index in range(len(tensor_recv_prev)): + tensor_recv_prev[index] = ( + gather_split_1d_tensor(tensor_recv_prev[index]).view(recv_prev_shape[index]).requires_grad_() + ) + + yield tensor_recv_prev + + +def send_backward_and_recv_next_backward_async( + input_tensor, + recv_next_shape: Union[torch.Size, List[torch.Size]] = None, + dtype: torch.dtype = None, + scatter_gather_tensors=False, +): + reqs = [] + tensor_recv_next = None + + # prepare send opreations + if input_tensor is not None: + prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE) + + input_tensor = process_object_to_send(input_tensor, scatter_gather_tensors) + + if isinstance(input_tensor, torch.Tensor): + reqs.append(dist.P2POp(dist.isend, input_tensor, prev_rank)) + else: + for tensor_to_comm in input_tensor: + reqs.append(dist.P2POp(dist.isend, tensor_to_comm, prev_rank)) + + # prepare receive opreations + if recv_next_shape is not None: + next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE) + # create receive buffer + tensor_recv_next, recv_next_split = create_recv_buffer_with_shapes( + recv_next_shape, dtype, scatter_gather_tensors + ) + # generate async receive opreations + if isinstance(tensor_recv_next, torch.Tensor): + reqs.append(dist.P2POp(dist.irecv, tensor_recv_next, next_rank)) + else: + for tensor_to_comm in tensor_recv_next: + reqs.append(dist.P2POp(dist.irecv, tensor_to_comm, next_rank)) + + if len(reqs) > 0: + reqs = dist.batch_isend_irecv(reqs) + + # return and do other things + yield + + # check communication completed + for req in reqs: + req.wait() + # To protect against race condition when using batch_isend_irecv() + torch.cuda.synchronize() + + # Process received data + if recv_next_shape is not None and recv_next_split: + if isinstance(tensor_recv_next, torch.Tensor): + tensor_recv_next = gather_split_1d_tensor(tensor_recv_next).view(recv_next_shape).requires_grad_() + else: + for index in range(len(tensor_recv_next)): + tensor_recv_next[index] = ( + gather_split_1d_tensor(tensor_recv_next[index]).view(recv_next_shape[index]).requires_grad_() + ) + + yield tensor_recv_next + + +class AsynCommunicator: + """AsynCommunicator for managing async communication.""" + + def __init__( + self, + tensor_to_send: Union[torch.Tensor, List[torch.Tensor]], + recv_shape: Union[torch.Size, List[torch.Size]], + dtype: torch.dtype = None, + scatter_gather_tensors=False, + forward: bool = True, + ) -> None: + self._need_receive = recv_shape is not None + + if forward: + self._coroutine = send_forward_and_recv_next_forward_async( + tensor_to_send, recv_shape, dtype, scatter_gather_tensors + ) + else: + self._coroutine = send_backward_and_recv_next_backward_async( + tensor_to_send, recv_shape, dtype, scatter_gather_tensors + ) + + @property + def need_receive(self) -> bool: + return self._need_receive + + def start(self) -> None: + next(self._coroutine) + + def wait_and_receive(self) -> Union[torch.Tensor, List[torch.Tensor]]: + received = next(self._coroutine) + self._coroutine.close() + + return received diff --git a/internlm/core/communication/utils.py b/internlm/core/communication/utils.py new file mode 100644 index 0000000..f413286 --- /dev/null +++ b/internlm/core/communication/utils.py @@ -0,0 +1,125 @@ +# adopted from https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/communication + +from typing import List, Tuple, Union + +import torch +import torch.distributed as dist + +from internlm.core.context import ParallelMode +from internlm.core.context import global_context as gpc +from internlm.utils.common import get_current_device + +TensorShape = Union[torch.Size, List[int], Tuple[int]] + + +def send_meta_helper(obj, next_rank, tensor_kwargs): + send_shape = torch.tensor(obj.size(), **tensor_kwargs) + send_ndims = torch.tensor(len(obj.size()), **tensor_kwargs) + dist.send(send_ndims, next_rank) + dist.send(send_shape, next_rank) + + +def send_obj_meta(obj, next_rank=None): + """Sends obj meta information before sending a specific obj. + Since the recipient must know the shape of the obj in p2p communications, + meta information of the obj should be sent before communications. This function + synchronizes with :func:`recv_obj_meta`. + + Args: + obj (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): obj to be sent. + need_meta (bool, optional): If False, meta information won't be sent. + next_rank (int): The rank of the next member in pipeline parallel group. + + Returns: + bool: False + """ + if next_rank is None: + next_rank = gpc.get_next_global_rank(ParallelMode.PIPELINE) + + tensor_kwargs = {"dtype": torch.long, "device": get_current_device()} + if isinstance(obj, torch.Tensor): + send_obj_nums = torch.tensor(1, **tensor_kwargs) + dist.send(send_obj_nums, next_rank) + send_meta_helper(obj, next_rank, tensor_kwargs) + else: + send_obj_nums = torch.tensor(len(obj), **tensor_kwargs) + dist.send(send_obj_nums, next_rank) + for tensor_to_send in obj: + send_meta_helper(tensor_to_send, next_rank, tensor_kwargs) + + +def recv_meta_helper(prev_rank, tensor_kwargs): + recv_ndims = torch.empty((), **tensor_kwargs) + dist.recv(recv_ndims, prev_rank) + recv_shape = torch.empty(recv_ndims, **tensor_kwargs) + dist.recv(recv_shape, prev_rank) + return recv_shape + + +def recv_obj_meta(prev_rank=None) -> torch.Size: + """Receives obj meta information before receiving a specific obj. + Since the recipient must know the shape of the obj in p2p communications, + meta information of the obj should be received before communications. This function + synchronizes with :func:`send_obj_meta`. + + Args: + obj_shape (Union[:class:`torch.Size`, List[:class:`torch.Size`]]): The shape of the obj to be received. + prev_rank (int): The rank of the source of the obj. + + Returns: + Union[:class:`torch.Size`, List[:class:`torch.Size`]]: The shape of the obj to be received. + """ + if prev_rank is None: + prev_rank = gpc.get_prev_global_rank(ParallelMode.PIPELINE) + + tensor_kwargs = {"dtype": torch.long, "device": get_current_device()} + recv_obj_nums = torch.empty((), **tensor_kwargs) + dist.recv(recv_obj_nums, prev_rank) + if recv_obj_nums.item() == 1: + recv_shape = recv_meta_helper(prev_rank, tensor_kwargs) + obj_shape = torch.Size(recv_shape) + else: + obj_shape = [] + for _ in range(recv_obj_nums.item()): + recv_shape = recv_meta_helper(prev_rank, tensor_kwargs) + obj_shape.append(torch.Size(recv_shape)) + + return obj_shape + + +def split_tensor_into_1d_equal_chunks(tensor: torch.Tensor, new_buffer=False) -> torch.Tensor: + """Break a tensor into equal 1D chunks. + + Args: + tensor (:class:`torch.Tensor`): Tensor to be split before communication. + new_buffer (bool, optional): Whether to use a new buffer to store sliced tensor. + + Returns: + :class:`torch.Tensor`: The split tensor + """ + partition_size = torch.numel(tensor) // gpc.get_world_size(ParallelMode.TENSOR) + start_index = partition_size * gpc.get_local_rank(ParallelMode.TENSOR) + end_index = start_index + partition_size + if new_buffer: + data = torch.empty(partition_size, dtype=tensor.dtype, device=torch.cuda.current_device(), requires_grad=False) + data.copy_(tensor.view(-1)[start_index:end_index]) + else: + data = tensor.view(-1)[start_index:end_index] + return data + + +def gather_split_1d_tensor(tensor: torch.Tensor) -> torch.Tensor: + """Opposite of above function, gather values from model parallel ranks. + + Args: + tensor (:class:`torch.Tensor`): Tensor to be gathered after communication. + Returns: + :class:`torch.Tensor`: The gathered tensor. + """ + world_size = gpc.get_world_size(ParallelMode.TENSOR) + numel = torch.numel(tensor) + numel_gathered = world_size * numel + gathered = torch.empty(numel_gathered, dtype=tensor.dtype, device=torch.cuda.current_device(), requires_grad=False) + chunks = [gathered[i * numel : (i + 1) * numel] for i in range(world_size)] + dist.all_gather(chunks, tensor, group=gpc.get_group(ParallelMode.TENSOR)) + return gathered diff --git a/internlm/core/context/parallel_context.py b/internlm/core/context/parallel_context.py index bc0346c..87d3114 100644 --- a/internlm/core/context/parallel_context.py +++ b/internlm/core/context/parallel_context.py @@ -464,7 +464,6 @@ class ParallelContext(metaclass=SingletonMeta): initializers.append(pgroup_initializer.Initializer_Zero1(*initializer_args)) if self.pipeline_parallel_size > 1: initializers.append(pgroup_initializer.Initializer_Pipeline(*initializer_args)) - for initializer in initializers: parallel_setting = initializer.init_dist_group() if isinstance(parallel_setting, list): diff --git a/internlm/core/naive_amp.py b/internlm/core/naive_amp.py index 845a984..7470659 100644 --- a/internlm/core/naive_amp.py +++ b/internlm/core/naive_amp.py @@ -73,6 +73,17 @@ class NaiveAMPModel(nn.Module): input_ = input_.float() return input_ + def convert_to_fp32(self, out): + """Converts the output to fp32""" + if isinstance(out, Tensor): + out = self._convert_to_fp32(out) + elif isinstance(out, (tuple, list)): + out = [self._convert_to_fp32(val) for val in out] + elif isinstance(out, dict): + out = {key: self._convert_to_fp32(val) for key, val in out.items()} + + return out + def _reduce_module_buffer(self): """ All-reduces the buffers (e.g., running stats of batch normalization) across @@ -121,10 +132,5 @@ class NaiveAMPModel(nn.Module): out = self.model(*args, **kwargs) if self._output_to_fp32: - if isinstance(out, Tensor): - out = self._convert_to_fp32(out) - elif isinstance(out, (tuple, list)): - out = [self._convert_to_fp32(val) for val in out] - elif isinstance(out, dict): - out = {key: self._convert_to_fp32(val) for key, val in out.items()} + out = self.convert_to_fp32(out) return out diff --git a/internlm/core/no_pipeline_scheduler.py b/internlm/core/no_pipeline_scheduler.py deleted file mode 100644 index 1f201e5..0000000 --- a/internlm/core/no_pipeline_scheduler.py +++ /dev/null @@ -1,279 +0,0 @@ -#!/usr/bin/env python -# -*- encoding: utf-8 -*- - -# adopted from https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/engine -import inspect -from abc import ABC, abstractmethod -from typing import Any, Callable, Iterable - -import torch - -from internlm.core.engine import Engine -from internlm.utils.common import conditional_context - - -class BaseScheduler(ABC): - """A basic helper class to control the process of training or evaluation. - It mainly composes of forward_backward_step for gradient backward and - optimizer_step for parameters update. - For the convenience to enable FP16, we aggregate all codes that contain the - control of FP16 in class schedule. - - Args: - data_process_func (Callable, optional): The preprocessing function which receives a batch of data and arranges - them into data and label. - """ - - def __init__(self, data_process_func: Callable = None): - self.data_process_func = data_process_func - - @abstractmethod - def pre_processing(self, engine: Engine): - """To perform actions before running the schedule. - - Args: - engine (internlm.core.Engine): InternLM engine for training and inference. - """ - pass - - @abstractmethod - def forward_backward_step( - self, - engine: Engine, - data_iter: Iterable, - forward_only: bool, - return_loss: bool = True, - return_output_label: bool = True, - ): - """The process function over a batch of dataset for training or evaluation. - - Args: - engine (internlm.core.Engine): InternLM engine for training and inference. - data_iter (Iterable): Data iterator from which get a batch of data, obtained by calling iter(dataloader). - forward_only (bool): If True, the process won't include backward. - return_loss (bool, optional): If False, the loss won't be returned. - return_output_label (bool, optional): If False, the output and label won't be returned. - """ - pass - - @staticmethod - def _call_engine(engine: Engine, inputs: Any): - """Calls the engine with the given inputs. - - Args: - engine (internlm.core.Engine): InternLM engine for training and inference. - inputs (Any): The inputs to the engine, can be of type torch.Tensor, list, tuple, or dict. - """ - if isinstance(inputs, torch.Tensor): - return engine(inputs) - elif isinstance(inputs, (list, tuple)): - return engine(*inputs) - elif isinstance(inputs, dict): - return engine(**inputs) - else: - raise TypeError( - f"Expected engine inputs to be of type torch.Tensor, list, tuple, or dict, but got {type(inputs)}" - ) - - @staticmethod - def _call_engine_criterion(engine: Engine, outputs: Any, labels: Any): - """Calls the engine's criterion with the given outputs and labels. - - Args: - engine (internlm.core.Engine): InternLM engine for training and inference. - outputs (Any): The outputs from the model, can be of type torch.Tensor, list, tuple, or dict. - labels (Any): The labels for the outputs, can be of type torch.Tensor, list, tuple, or dict. - """ - assert isinstance( - outputs, (torch.Tensor, list, tuple, dict) - ), f"Expect output of model is (torch.Tensor, list, tuple), got {type(outputs)}" - if isinstance(outputs, torch.Tensor): - outputs = (outputs,) - if isinstance(labels, torch.Tensor): - labels = (labels,) - - if isinstance(outputs, (tuple, list)) and isinstance(labels, (tuple, list)): - return engine.criterion(*outputs, *labels) - elif isinstance(outputs, (tuple, list)) and isinstance(labels, dict): - return engine.criterion(*outputs, **labels) - elif isinstance(outputs, dict) and isinstance(labels, dict): - return engine.criterion(**outputs, **labels) - elif isinstance(outputs, dict) and isinstance(labels, (list, tuple)): - raise ValueError(f"Expected labels to be a dict when the model outputs are dict, but got {type(labels)}") - else: - raise TypeError( - f"Expected model outputs and labels to be of type torch.Tensor ' \ - '(which is auto-converted to tuple), list, tuple, or dict, ' \ - 'but got {type(outputs)} (model outputs) and {type(labels)} (labels)" - ) - - -class NonPipelineScheduler(BaseScheduler): - """A helper schedule class for no pipeline parallelism running environment. - During one process, it loads a batch of dataset and feeds it to the model. - After getting the output and calculating the loss, it will use :meth:`step` - to update the parameters if it is in training mode. - - Args: - data_process_func (Callable, optional): The preprocessing function which receives a batch of data - and returns a tuple in the form of (data, label), and it will be executed in load_batch. - gradient_accumulation_steps(int, optional): the steps of gradient accumulation, 1 for disable - gradient accumulation. - - Example: - # this shows an example of customized data_process_func - def data_process_func(dataloader_output): - item1, item2, item3 = dataloader_output - data = (item1, item2) - label = item3 - return data, label - """ - - def __init__(self, data_process_func: Callable = None, gradient_accumulation_size: int = 1): - # check that non-pipeline schedule data process func only takes in one parameter - # which is the batch data - if data_process_func: - sig = inspect.signature(data_process_func) - assert len(sig.parameters) == 1, ( - "The data_process_func only takes in one parameter for NonPipelineSchedule, " - "which is a tuple of tensors for the current batch, " - "i.e. data_process_func(dataloader_output)." - ) - - self._grad_accum_size = gradient_accumulation_size - self._grad_accum_batch_size = 1 # static batch size for flash attetion. - self._grad_accum_offset = 0 - - super().__init__(data_process_func) - - def pre_processing(self, engine: Engine): - """Performs actions before running the schedule. - - Args: - engine (internlm.core.Engine): InternLM engine for training and inference. - """ - pass - - def _load_accum_batch(self, data: Any, label: Any): - """Loads a batch of data and label for gradient accumulation. - - Args: - data (Any): The data to be loaded. - label (Any): The label to be loaded. - """ - _data = { - k: v[self._grad_accum_offset : self._grad_accum_offset + self._grad_accum_batch_size] - for k, v in data.items() - } - _label = label[self._grad_accum_offset : self._grad_accum_offset + self._grad_accum_batch_size] - - self._grad_accum_offset += self._grad_accum_batch_size - - return _data, _label - - def _train_one_batch( - self, - data: Any, - label: Any, - engine: Engine, - forward_only: bool = False, - return_loss: bool = True, - scale_loss: int = 1, - ): - """Trains one batch of data. - - Args: - data (Any): The data to be trained. - label (Any): The label for the data. - engine (internlm.core.Engine): InternLM engine for training and inference. - forward_only (bool, optional): If True, the model is run for the forward pass, else back propagation will - be executed. - return_loss (bool, optional): Loss will be returned if True. - scale_loss (int, optional): The scale factor for the loss. - """ - - # forward - with conditional_context(torch.no_grad(), enable=forward_only): - output = self._call_engine(engine, data) - - if return_loss: - loss = self._call_engine_criterion(engine, output, label) - loss /= scale_loss - - # backward - if not forward_only: - engine.backward(loss) - - if not return_loss: - loss = None - - return output, loss - - def forward_backward_step( - self, - engine: Engine, - data_iter: Iterable, - forward_only: bool = False, - return_loss: bool = True, - return_output_label: bool = True, - ): - """The process function that loads a batch of dataset and feeds it to the model. - The returned labels and loss will None if :attr:`return_loss` is False. - - Args: - engine (internlm.core.Engine): InternLM engine for training and inference. - data_iter (Iterable): Dataloader as the form of an iterator, obtained by calling iter(dataloader). - forward_only (bool, optional): - If True, the model is run for the forward pass, else back propagation will be executed. - return_loss (bool, optional): Loss will be returned if True. - return_output_label (bool, optional): Output and label will be returned if True. - - Returns: - Tuple[:class:`torch.Tensor`]: A tuple of (output, label, loss), loss and label could be None. - """ - assert ( - forward_only or return_loss - ), "The argument 'return_loss' has to be True when 'forward_only' is False, but got False." - - batch_data, batch_size = engine.load_batch(data_iter) - - assert ( - batch_size == self._grad_accum_size - ), f"batch_size:{batch_size} must be equal to gradient accumulation steps:{self._grad_accum_size}" - - if self.data_process_func: - data, label = self.data_process_func(batch_data) - else: - # if not batch data process func is given, - # then we regard the batch data as a simple tuple of (data, label) - data, label = batch_data - - loss = 0 if return_loss else None - outputs = [] - labels = [] - - # reset accumulation microbatch offset - self._grad_accum_offset = 0 - - for _current_accum_step in range(self._grad_accum_size): - if _current_accum_step == self._grad_accum_size - 1: - engine.optimizer.skip_grad_reduce = False - else: - engine.optimizer.skip_grad_reduce = True - - _data, _label = self._load_accum_batch(data, label) - - _output, _loss = self._train_one_batch( - _data, _label, engine, forward_only, return_loss, self._grad_accum_size - ) - - if return_loss: - loss += _loss - if return_output_label: - outputs.append(_output) - labels.append(_label) - - if not return_output_label: - outputs, labels = None, None - - return outputs, labels, loss diff --git a/internlm/core/scheduler/__init__.py b/internlm/core/scheduler/__init__.py new file mode 100644 index 0000000..a9bf013 --- /dev/null +++ b/internlm/core/scheduler/__init__.py @@ -0,0 +1,12 @@ +from .base_scheduler import BaseScheduler, SchedulerHook, SchedulerMetricHook +from .no_pipeline_scheduler import NonPipelineScheduler +from .pipeline_scheduler import InterleavedPipelineScheduler, PipelineScheduler + +__all__ = [ + "BaseScheduler", + "NonPipelineScheduler", + "InterleavedPipelineScheduler", + "PipelineScheduler", + "SchedulerHook", + "SchedulerMetricHook", +] diff --git a/internlm/core/scheduler/base_scheduler.py b/internlm/core/scheduler/base_scheduler.py new file mode 100644 index 0000000..20b4460 --- /dev/null +++ b/internlm/core/scheduler/base_scheduler.py @@ -0,0 +1,187 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +# adopted from https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/engine + +from abc import ABC, abstractmethod +from typing import Any, Callable, Iterable, Optional + +import torch + +from internlm.core.engine import Engine +from internlm.utils.megatron_timers import megatron_timer as timer + + +class BaseScheduler(ABC): + """A basic helper class to control the process of training or evaluation. + It mainly composes of forward_backward_step for gradient backward and + optimizer_step for parameters update. + For the convenience to enable FP16, we aggregate all codes that contain the + control of FP16 in class schedule. + + Args: + data_process_func (Callable, optional): The preprocessing function which receives a batch of data and arranges + them into data and label. + """ + + def __init__(self, data_process_func: Callable = None): + self.data_process_func = data_process_func + + @abstractmethod + def pre_processing(self, engine: Engine): + """To perform actions before running the schedule. + + Args: + engine (internlm.core.Engine): InternLM engine for training and inference. + """ + pass + + def _load_micro_batch(self, data, label, offset, micro_bsz): + assert isinstance(data, dict) and isinstance(label, torch.Tensor) + micro_batch_data = {k: v[offset : offset + micro_bsz] for k, v in data.items()} + micro_batch_label = label[offset : offset + micro_bsz] + + return micro_batch_data, micro_batch_label + + @abstractmethod + def forward_backward_step( + self, + engine: Engine, + data_iter: Iterable, + forward_only: bool, + return_loss: bool = True, + return_output_label: bool = True, + ): + """The process function over a batch of dataset for training or evaluation. + + Args: + engine (internlm.core.Engine): InternLM engine for training and inference. + data_iter (Iterable): Data iterator from which get a batch of data, obtained by calling iter(dataloader). + forward_only (bool): If True, the process won't include backward. + return_loss (bool, optional): If False, the loss won't be returned. + return_output_label (bool, optional): If False, the output and label won't be returned. + """ + pass + + @staticmethod + def _call_engine(engine: Engine, inputs: Any): + """Calls the engine with the given inputs. + + Args: + engine (internlm.core.Engine): InternLM engine for training and inference. + inputs (Any): The inputs to the engine, can be of type torch.Tensor, list, tuple, or dict. + """ + if isinstance(inputs, torch.Tensor): + return engine(inputs) + elif isinstance(inputs, (list, tuple)): + return engine(*inputs) + elif isinstance(inputs, dict): + return engine(**inputs) + else: + raise TypeError( + f"Expected engine inputs to be of type torch.Tensor, list, tuple, or dict, but got {type(inputs)}" + ) + + @staticmethod + def _call_engine_criterion(engine: Engine, outputs: Any, labels: Any): + """Calls the engine's criterion with the given outputs and labels. + + Args: + engine (internlm.core.Engine): InternLM engine for training and inference. + outputs (Any): The outputs from the model, can be of type torch.Tensor, list, tuple, or dict. + labels (Any): The labels for the outputs, can be of type torch.Tensor, list, tuple, or dict. + """ + assert isinstance( + outputs, (torch.Tensor, list, tuple, dict) + ), f"Expect output of model is (torch.Tensor, list, tuple), got {type(outputs)}" + if isinstance(outputs, torch.Tensor): + outputs = (outputs,) + if isinstance(labels, torch.Tensor): + labels = (labels,) + + if isinstance(outputs, (tuple, list)) and isinstance(labels, (tuple, list)): + return engine.criterion(*outputs, *labels) + elif isinstance(outputs, (tuple, list)) and isinstance(labels, dict): + return engine.criterion(*outputs, **labels) + elif isinstance(outputs, dict) and isinstance(labels, dict): + return engine.criterion(**outputs, **labels) + elif isinstance(outputs, dict) and isinstance(labels, (list, tuple)): + raise ValueError(f"Expected labels to be a dict when the model outputs are dict, but got {type(labels)}") + else: + raise TypeError( + f"Expected model outputs and labels to be of type torch.Tensor ' \ + '(which is auto-converted to tuple), list, tuple, or dict, ' \ + 'but got {type(outputs)} (model outputs) and {type(labels)} (labels)" + ) + + +class SchedulerHook(ABC): + """ + Scheduler Hook. + """ + + @abstractmethod + def before_forward(self, scheduler, inputs) -> None: + """Actions before forward""" + + @abstractmethod + def after_forward(self, scheduler, outputs) -> None: + """Actions after forward""" + + @abstractmethod + def before_criterion(self, scheduler, outputs, label) -> None: + """Actions before criterion""" + + @abstractmethod + def after_criterion(self, scheduler, loss) -> None: + """Actions after criterion""" + + @abstractmethod + def before_backward(self, scheduler, outputs, outputs_grad) -> None: + """Actions before backward""" + + @abstractmethod + def after_backward(self, scheduler, inputs_grad) -> None: + """Actions after backward""" + + @abstractmethod + def post_helper_func(self, scheduler, outputs, label) -> None: + """A post helper function""" + + +class SchedulerMetricHook(SchedulerHook): + """ + Scheduler Metric Hook. + """ + + def __init__(self, metric: Optional[Callable] = None, skip: bool = False) -> None: + self._post_func = metric + self._skip = skip + + def before_forward(self, scheduler, inputs) -> None: + if not self._skip: + timer("fwd").start() + + def after_forward(self, scheduler, outputs) -> None: + if not self._skip: + timer("fwd").stop() + + def before_criterion(self, scheduler, outputs, label) -> None: + if not self._skip: + timer("cal_loss").start() + + def after_criterion(self, scheduler, loss) -> None: + if not self._skip: + timer("cal_loss").stop() + + def before_backward(self, scheduler, outputs, outputs_grad) -> None: + if not self._skip: + timer("bwd").start() + + def after_backward(self, scheduler, inputs_grad) -> None: + if not self._skip: + timer("bwd").stop() + + def post_helper_func(self, scheduler, outputs, label) -> None: + if self._post_func is not None: + self._post_func(outputs, label) diff --git a/internlm/core/scheduler/no_pipeline_scheduler.py b/internlm/core/scheduler/no_pipeline_scheduler.py new file mode 100644 index 0000000..2633a9c --- /dev/null +++ b/internlm/core/scheduler/no_pipeline_scheduler.py @@ -0,0 +1,192 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +# adopted from https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/engine + +from typing import Any, Callable, Iterable, List, Optional + +import torch + +from internlm.core.engine import Engine +from internlm.utils.common import conditional_context + +from .base_scheduler import BaseScheduler, SchedulerHook + + +class NonPipelineScheduler(BaseScheduler): + """A helper schedule class for no pipeline parallelism running environment. + During one process, it loads a batch of dataset and feeds it to the model. + After getting the output and calculating the loss, it will use :meth:`step` + to update the parameters if it is in training mode. + + Args: + data_process_func (Callable, optional): The preprocessing function which receives a batch of data + and returns a tuple in the form of (data, label), and it will be executed in load_batch. + gradient_accumulation_steps(int, optional): the steps of gradient accumulation, 1 for disable + gradient accumulation. + + Example: + # this shows an example of customized data_process_func + def data_process_func(dataloader_output): + item1, item2, item3 = dataloader_output + data = (item1, item2) + label = item3 + return data, label + """ + + def __init__( + self, + data_process_func: Callable = None, + gradient_accumulation_size: int = 1, + scheduler_hooks: Optional[List[SchedulerHook]] = None, + ): + self._grad_accum_size = gradient_accumulation_size + self._grad_accum_offset = 0 + + self._hooks = scheduler_hooks + + super().__init__(data_process_func) + + def pre_processing(self, engine: Engine): + """Performs actions before running the schedule. + + Args: + engine (internlm.core.Engine): InternLM engine for training and inference. + """ + pass + + def _call_hooks(self, func_name: str, *args, **kwargs) -> None: + for hook in self._hooks: + getattr(hook, func_name)(self, *args, **kwargs) + + def _load_accum_batch(self, data: Any, label: Any): + """Loads a batch of data and label for gradient accumulation. + + Args: + data (Any): The data to be loaded. + label (Any): The label to be loaded. + """ + + _data, _label = self._load_micro_batch( + data=data, label=label, offset=self._grad_accum_offset, micro_bsz=self._grad_accum_batch_size + ) + self._grad_accum_offset += self._grad_accum_batch_size + + if self.data_process_func: + _data["input_ids"] = self.data_process_func(_data["input_ids"], _data["cu_seqlens"]) + _label = self.data_process_func(_label, _data["cu_seqlens"]) + _data.pop("cu_seqlens") + _data.pop("indexes") + + return _data, _label + + def _train_one_batch( + self, + data: Any, + label: Any, + engine: Engine, + forward_only: bool = False, + return_loss: bool = True, + scale_loss: int = 1, + ): + """Trains one batch of data. + + Args: + data (Any): The data to be trained. + label (Any): The label for the data. + engine (internlm.core.Engine): InternLM engine for training and inference. + forward_only (bool, optional): If True, the model is run for the forward pass, else back propagation will + be executed. + return_loss (bool, optional): Loss will be returned if True. + scale_loss (int, optional): The scale factor for the loss. + """ + + # forward + with conditional_context(torch.no_grad(), enable=forward_only): + self._call_hooks("before_forward", data) + output = self._call_engine(engine, data) + self._call_hooks("after_forward", output) + + self._call_hooks("post_helper_func", output, label) + + if return_loss: + self._call_hooks("before_criterion", output, label) + loss = self._call_engine_criterion(engine, output, label) + self._call_hooks("after_criterion", loss) + loss /= scale_loss + + # backward + if not forward_only: + self._call_hooks("before_backward", None, None) + engine.backward(loss) + self._call_hooks("after_backward", None) + + if not return_loss: + loss = None + + return output, loss + + def forward_backward_step( + self, + engine: Engine, + data_iter: Iterable, + forward_only: bool = False, + return_loss: bool = True, + return_output_label: bool = True, + ): + """The process function that loads a batch of dataset and feeds it to the model. + The returned labels and loss will None if :attr:`return_loss` is False. + + Args: + engine (internlm.core.Engine): InternLM engine for training and inference. + data_iter (Iterable): Dataloader as the form of an iterator, obtained by calling iter(dataloader). + forward_only (bool, optional): + If True, the model is run for the forward pass, else back propagation will be executed. + return_loss (bool, optional): Loss will be returned if True. + return_output_label (bool, optional): Output and label will be returned if True. + + Returns: + Tuple[:class:`torch.Tensor`]: A tuple of (output, label, loss), loss and label could be None. + """ + assert ( + forward_only or return_loss + ), "The argument 'return_loss' has to be True when 'forward_only' is False, but got False." + + batch_data, batch_size = engine.load_batch(data_iter) + + assert ( + batch_size % self._grad_accum_size == 0 + ), f"batch_size:{batch_size} must be an integer multiple of gradient accumulation steps:{self._grad_accum_size}" + self._grad_accum_batch_size = batch_size // self._grad_accum_size + + data, label = batch_data + + loss = 0 if return_loss else None + outputs = [] + labels = [] + + # reset accumulation microbatch offset + self._grad_accum_offset = 0 + + for _current_accum_step in range(self._grad_accum_size): + if _current_accum_step == self._grad_accum_size - 1: + engine.optimizer.skip_grad_reduce = False + else: + engine.optimizer.skip_grad_reduce = True + + _data, _label = self._load_accum_batch(data, label) + + _output, _loss = self._train_one_batch( + _data, _label, engine, forward_only, return_loss, self._grad_accum_size + ) + + if return_loss: + loss += _loss + if return_output_label: + outputs.append(_output) + labels.append(_label) + + if not return_output_label: + outputs, labels = None, None + + return outputs, labels, loss diff --git a/internlm/core/scheduler/pipeline_scheduler.py b/internlm/core/scheduler/pipeline_scheduler.py new file mode 100644 index 0000000..501794d --- /dev/null +++ b/internlm/core/scheduler/pipeline_scheduler.py @@ -0,0 +1,1293 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +# adopted from https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/engine + +from contextlib import contextmanager +from typing import Callable, List, Optional, Tuple, Union + +import torch.cuda + +import internlm.core.communication as comm +from internlm.core.context import ParallelMode +from internlm.core.context import global_context as gpc +from internlm.core.engine import Engine +from internlm.core.naive_amp import NaiveAMPModel +from internlm.utils.common import get_current_device, move_to_device +from internlm.utils.logger import get_logger + +from .base_scheduler import BaseScheduler, SchedulerHook + +logger = get_logger(__file__) + + +def get_tensor_shape(): + if hasattr(gpc.config, "TENSOR_SHAPE"): + return gpc.config.TENSOR_SHAPE + + if not gpc.is_initialized(ParallelMode.PIPELINE): + return None + + if hasattr(gpc.config, "SEQ_LEN") and hasattr(gpc.config.data, "micro_bsz") and hasattr(gpc.config, "HIDDEN_SIZE"): + if gpc.config.model.use_flash_attn: + if gpc.config.parallel.sequence_parallel: + sequence_world_size = gpc.get_world_size(ParallelMode.TENSOR) + tensor_shape = ( + gpc.config.SEQ_LEN * gpc.config.data["micro_bsz"] // sequence_world_size, + gpc.config.HIDDEN_SIZE, + ) + else: + tensor_shape = ( + gpc.config.SEQ_LEN * gpc.config.data["micro_bsz"], + gpc.config.HIDDEN_SIZE, + ) + else: + tensor_shape = ( + gpc.config.data["micro_bsz"], + gpc.config.SEQ_LEN, + gpc.config.HIDDEN_SIZE, + ) + return tensor_shape + else: + return None + + +def pack_return_tensors(return_tensors): + output, label = tuple(zip(*return_tensors)) + if isinstance(output[0], torch.Tensor): + output = torch.cat(output, dim=0) + elif isinstance(output[0], (list, tuple)): + output = tuple(torch.cat(tensors, dim=0) for tensors in zip(*output)) + else: + raise TypeError("Output of model must be tensor or list/tuple of tensors") + if isinstance(label[0], torch.Tensor): + label = torch.cat(label, dim=0) + else: + merged_label = {k: [] for k in label[0].keys()} + for d in label: + for k, v in d.items(): + merged_label[k].append(v) + label = {k: torch.cat(v, dim=0) for k, v in merged_label.items()} + return output, label + + +@contextmanager +def switch_virtual_pipeline_parallel_rank(rank): + prev_rank = gpc.virtual_pipeline_parallel_rank + try: + gpc.set_virtual_pipeline_parallel_rank(rank) + yield + finally: + gpc.set_virtual_pipeline_parallel_rank(prev_rank) + + +@contextmanager +def switch_optimizer_grad_sync_skip_mode(optimizer, skip: bool = True): + prev_mode = optimizer.skip_grad_reduce + try: + optimizer.skip_grad_reduce = skip + yield + finally: + optimizer.skip_grad_reduce = prev_mode + + +class PipelineScheduler(BaseScheduler): + """ + A helper schedule class for pipeline parallelism running environment. + It uses non-interleaved 1F1B strategy. Other properties are similar as + :class:`NonPipelineSchedule`. + + Args: + num_microbatches (int): The number of microbatches. + dtype (torch.dtype): Type of data. torch.float by default. + data_process_func (Callable, optional): + The post processing function which receives a micro batch of data, and it will be executed + in `load_micro_batch`. + tensor_shape (torch.Size, optional): Specified shape in pipeline communication. + scatter_gather_tensors (bool, optional): + If set to `True`, communication will be reduced over pipeline when using 1D tensor parallelization. + scheduler_hooks (Optional[List[SchedulerHook]], optional): List of scheduler hooks. + """ + + def __init__( + self, + num_microbatches: int, + dtype: torch.dtype = torch.float, + data_process_func: Callable = None, + tensor_shape: Union[torch.Size, List[int], Tuple[int]] = None, + scatter_gather_tensors: bool = False, + scheduler_hooks: Optional[List[SchedulerHook]] = None, + ): + assert num_microbatches > 0, f"expected num_microbatches to be larger then 1, but got {num_microbatches}" + + assert not isinstance( + tensor_shape, int + ), "tensor_shape type should be one of Union[torch.Size, List[int], Tuple[int]]." + + super().__init__(data_process_func=data_process_func) + + self.num_microbatches = num_microbatches + self.dtype = dtype + self._hooks = scheduler_hooks + + self._tensor_shape = ( + tensor_shape if tensor_shape is None or isinstance(tensor_shape, torch.Size) else torch.Size(tensor_shape) + ) + + self.scatter_gather_tensors = ( + scatter_gather_tensors + and gpc.is_initialized(ParallelMode.TENSOR) + and gpc.get_world_size(ParallelMode.TENSOR) > 1 + ) + + if gpc.config.parallel.sequence_parallel: + self.scatter_gather_tensors = False + + # cache for the batch data + self.batch_data = None + + @property + def tensor_shape(self) -> torch.Size: + return self._tensor_shape + + @tensor_shape.setter + def tensor_shape(self, tensor_shape: torch.Size): + self._tensor_shape = tensor_shape + + def pre_processing(self, engine): + types = set() + + for param in engine.model.parameters(): + types.add(param.dtype) + assert len(types) == 1, f"Mixed types of parameter detected, {types}" + + self.dtype = types.pop() + + @staticmethod + def _call_engine(engine, data): # pylint: disable=W0237 + if data is None: + return None + + if isinstance(data, torch.Tensor): + return engine(data) + elif isinstance(data, (list, tuple)): + return engine(*data) + elif isinstance(data, dict): + stage_output = data.pop("stage_output", None) + + if stage_output is None: + return engine(**data) + elif isinstance(stage_output, torch.Tensor): + return engine(stage_output, **data) + elif isinstance(stage_output, (tuple, list)): + return engine(*stage_output, **data) + else: + raise TypeError( + f"Expected stage_output to be of type torch.Tensor, list, or tuple, " + f"but got {type(stage_output)}" + ) + else: + raise TypeError(f"Expected data to be of type torch.Tensor, list, tuple, or dict, but got {type(data)}") + + def load_batch(self, engine, data_iter): + # Pipeline schedule just puts data in memory + batch_data, batch_size = engine.load_batch(data_iter, to_gpu=False) + assert batch_size % self.num_microbatches == 0, "Batch size should divided by the number of microbatches" + + self.microbatch_offset = 0 + self.batch_size = batch_size + self.batch_data, self.batch_label = batch_data + self.microbatch_size = self.batch_size // self.num_microbatches + + def load_micro_batch(self): + micro_batch_data, micro_batch_label = self._load_micro_batch( + data=self.batch_data, label=self.batch_label, offset=self.microbatch_offset, micro_bsz=self.microbatch_size + ) + if self.data_process_func: + micro_batch_data["input_ids"] = self.data_process_func( + micro_batch_data["input_ids"], micro_batch_data["cu_seqlens"] + ) + micro_batch_label = self.data_process_func(micro_batch_label, micro_batch_data["cu_seqlens"]) + + micro_batch_data.pop("cu_seqlens") + micro_batch_data.pop("indexes") + + micro_batch_data["label"] = micro_batch_label + self.microbatch_offset += self.microbatch_size + + return move_to_device(micro_batch_data) + + def _get_data_label_for_current_step(self, stage_output, micro_batch_data): + if isinstance(micro_batch_data, (tuple, list)): + if gpc.is_first_rank(ParallelMode.PIPELINE): + # for the first stage, we use the data from the + # dataloader output by default + data, label = micro_batch_data + else: + # for non-first stage, we use the output passed + # by the previous as the model input + data = stage_output + _, label = micro_batch_data + elif isinstance(micro_batch_data, dict): + label = micro_batch_data.pop("label", None) + data = {"stage_output": stage_output, **micro_batch_data} + + return data, label + + def _call_hooks(self, func_name: str, *args, **kwargs) -> None: + for hook in self._hooks: + getattr(hook, func_name)(self, *args, **kwargs) + + def _get_current_microbatch_id(self, step_id: int) -> int: + """ + Get the current microbatch ID based on the step ID. + In 1f1b scheduler, the microbatch ID is the same as the step ID, + but it is important to note that the step ID is calculated separately + for forward and backward passes. + """ + return step_id + + def _forward_step(self, engine, input_obj, return_tensors, return_output_label=True, accum_loss=None): + """ + Forward step for passed-in model. If it is the first stage, the input tensor + is obtained from data_iterator, otherwise the passed-in input_obj is used. + Returns output tensor. This is a helper function and can be ignored by users. + + Args: + engine (colossalai.engine.Engine): Colossalai engine for training and inference. + input_obj (Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]): Input tensor for this pipeline stage. + return_tensors (List[:class:`torch.Tensor`]): A list of tensors to return. + return_output_label (bool, optional): Whether returns output labels. + accum_loss (optional): Where accumulated loss stores. + Returns: + Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: output or the loss value of the current + pipeline stage. + """ + micro_batch_data = self.load_micro_batch() + data, label = self._get_data_label_for_current_step(input_obj, micro_batch_data) + + self._call_hooks("before_forward", data) + output_obj = self._call_engine(engine.model, data) + self._call_hooks("after_forward", output_obj) + + if gpc.is_last_rank(ParallelMode.PIPELINE): + self._call_hooks("post_helper_func", output_obj, label) + if return_output_label: + return_tensors.append((output_obj, label)) + if accum_loss is not None: + self._call_hooks("before_criterion", output_obj, label) + loss = self._call_engine_criterion(engine, output_obj, label) + self._call_hooks("after_criterion", loss) + + loss_reduced = loss / self.num_microbatches + accum_loss.add_(loss_reduced.detach()) + output_obj = loss_reduced + + return output_obj + + def _backward_step(self, engine, step_id, input_obj, output_obj, output_obj_grad): + """ + Backward step through the passed-in output tensor. If it is the last stage, the + output_obj_grad is None, otherwise it is the gradients with respect to stage's output tensor. + Returns the gradients with respect to the input tensor (None if first stage). + This is a helper function and can be ignored by users. + + Args: + engine (colossalai.engine.Engine): Colossalai engine for training and inference. + step_id (int): The ID of the current step. + input_obj (Union[torch.Tensor, List[torch.Tensor]]): Input tensor for this stage. + output_obj (Union[torch.Tensor, List[torch.Tensor]]): Output tensor for this stage. + output_obj_grad (Union[torch.Tensor, List[torch.Tensor]]): Gradient of output tensor for this stage. + + Returns: + Union[torch.Tensor, List[torch.Tensor]]: Gradient of input tensor. + """ + + # Retain the grad on the input_obj. + if input_obj is not None: + if isinstance(input_obj, torch.Tensor): + input_obj.retain_grad() + else: + for in_tensor in input_obj: + if in_tensor is not None: + in_tensor.retain_grad() + + # Backward pass. + + # Only the last microbatch does syncing grad. + skip_grad_sync = self._get_current_microbatch_id(step_id) != self.num_microbatches - 1 + + self._call_hooks("before_backward", output_obj, output_obj_grad) + with switch_optimizer_grad_sync_skip_mode(engine.optimizer, skip_grad_sync): + if output_obj_grad is None: + engine.backward(output_obj) + else: + engine.backward_by_grad(output_obj, output_obj_grad) + + # Collect the grad of the input_obj. + input_obj_grad = None + if input_obj is not None: + if isinstance(input_obj, torch.Tensor): + input_obj_grad = input_obj.grad + else: + input_obj_grad = [] + for in_tensor in input_obj: + input_obj_grad.append(in_tensor.grad) + self._call_hooks("after_backward", input_obj_grad) + + return input_obj_grad + + def _forward_only_step(self, engine, return_loss=True, return_output_label=True): + """ + This function performs forward only computation process. The scheduling of microbatches is similar to the + warmup phase, where each microbatch first receives the forward input from the previous stage, then performs + the forward computation, and finally passes the forward computation output to the next stage. There are two + special cases to note: + 1. The first stage of the pipeline does not need to receive forward input; its input comes from the dataloader. + 2. The last stage of the pipeline does not need to send forward output; its output is returned to the user code + for processing. + + Args: + engine (colossalai.engine.Engine): internlm engine for training and inference. + return_loss (bool, optional): Whether to return the accumulated loss. + return_output_label (bool, optional): Whether to return outputs and labels. + + Returns: + Tuple[Union[torch.Tensor, None], Union[torch.Tensor, None], Union[torch.Tensor, None]]: + output, label, and accumulated loss. + """ + + # Input, output tensors only need to be saved when doing backward passes + return_tensors = [] + accum_loss = ( + torch.zeros(1, device=get_current_device()) + if return_loss and gpc.is_pipeline_last_stage(ignore_virtual=True) + else None + ) + + # Used for tensor meta information communication + forward_recv_shapes = self.tensor_shape + need_forward_meta = self.tensor_shape is None + + # Run all forward passes. + for _ in range(self.num_microbatches): + # Receive input from the previous stage + if not gpc.is_first_rank(ParallelMode.PIPELINE): + if forward_recv_shapes is None: + forward_recv_shapes = comm.recv_obj_meta() + input_obj = comm.recv_forward( + forward_recv_shapes, + dtype=self.dtype, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + else: + input_obj = None + + # Perform forward computation + output_obj = self._forward_step( + engine, + input_obj, + return_tensors, + return_output_label=return_output_label, + accum_loss=accum_loss, + ) + + if not gpc.is_last_rank(ParallelMode.PIPELINE): + if need_forward_meta: + comm.send_obj_meta(output_obj) + need_forward_meta = False # send only once. + # Send the forward computation output to the next stage + comm.send_forward(output_obj, scatter_gather_tensors=self.scatter_gather_tensors) + + output, label = pack_return_tensors(return_tensors) if len(return_tensors) > 0 else (None, None) + + return output, label, accum_loss + + def _forward_backward_step(self, engine, return_loss=True, return_output_label=True): + """ + This function schedules the forward and backward computation of microbatches in the pipeline in a 1F1B manner. + It consists of three stages: warmup, 1F1B, and cooldown. + + 1. Warmup Stage: + The warmup stage performs num_warmup forward microsteps. The calculation of num_warmup is the pipeline length + minus the rank of the current pipeline minus 1. For each microstep, it receives data as input from the previous + stage, performs the forward computation, and then sends the result to the next stage. + + 2. 1F1B Stage: + The 1F1B stage consists of pairs of forward and backward microsteps. It performs num_1f1b_micropairs iterations, + where num_1f1b_micropairs is calculated as the total number of microbatches minus the number of microbatches in + the warmup stage. In each iteration, it first performs a forward computation, sends the result to the next + stage, receives input for the backward computation, performs the backward computation, and finally sends the + result to the previous stage to receive input for the next forward computation. + + 3. Cooldown Stage: + The cooldown stage performs the same number of iterations as the warmup stage. In each iteration, it receives + input for the backward computation, performs the backward computation, and finally sends the result to the + previous stage. + + There are two special cases to consider: + 1. The first stage of the pipeline does not need to receive forward input or send backward output. The last + stage does not need to send forward output or receive backward input. + 2. Pay attention to the communication between stages and use additional communication to bridge the gap. + + Args: + engine (Engine): The engine used for computation. + return_loss (bool, optional): Whether to return the accumulated loss. + return_output_label (bool, optional): Whether to return outputs and labels. + + Returns: + Tuple[Union[torch.Tensor, None], Union[torch.Tensor, None], Union[torch.Tensor, None]]: + The output, label, and accumulated loss. + """ + + num_warmup_microsteps = ( + gpc.get_world_size(ParallelMode.PIPELINE) - gpc.get_local_rank(ParallelMode.PIPELINE) - 1 + ) + num_warmup_microsteps = min(num_warmup_microsteps, self.num_microbatches) + num_1f1b_micropairs = self.num_microbatches - num_warmup_microsteps + + # Input, output tensors only need to be saved when doing backward passes + input_objs = [] + output_objs = [] + return_tensors = [] + accum_loss = ( + torch.zeros(1, device=get_current_device()) + if return_loss and gpc.is_pipeline_last_stage(ignore_virtual=True) + else None + ) + + # Used for tensor meta information communication + forward_recv_shapes = self.tensor_shape + backward_recv_shapes = None + need_forward_meta = self.tensor_shape is None + + # Run warmup forward passes. + for i in range(num_warmup_microsteps): + # Receive the input from the previous stage + if not gpc.is_first_rank(ParallelMode.PIPELINE): + if forward_recv_shapes is None: + forward_recv_shapes = comm.recv_obj_meta() + input_obj = comm.recv_forward( + forward_recv_shapes, + dtype=self.dtype, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + else: + input_obj = None + + # Perform forward computation + output_obj = self._forward_step( + engine, + input_obj, + return_tensors, + return_output_label=return_output_label, + accum_loss=accum_loss, + ) + + if not gpc.is_last_rank(ParallelMode.PIPELINE): + if isinstance(output_obj, torch.Tensor): + backward_recv_shapes = output_obj.shape + else: + backward_recv_shapes = [out_tensor.shape for out_tensor in output_obj] + + if need_forward_meta: + comm.send_obj_meta(output_obj) + need_forward_meta = False # send only once. + + # Send the output of forward computation of this pipeline stage to the next pipeline stage as input for + # forward computation + if not gpc.is_last_rank(ParallelMode.PIPELINE): + comm.send_forward(output_obj, scatter_gather_tensors=self.scatter_gather_tensors) + + input_objs.append(input_obj) + output_objs.append(output_obj) + + # Before running 1F1B, need to receive first forward tensor. + # If all microbatches are run in warmup / cooldown phase, then no need to + # receive this tensor here. + if num_1f1b_micropairs > 0: + if not gpc.is_first_rank(ParallelMode.PIPELINE): + if forward_recv_shapes is None: + forward_recv_shapes = comm.recv_obj_meta(forward_recv_shapes) + input_obj = comm.recv_forward( + forward_recv_shapes, + dtype=self.dtype, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + else: + input_obj = None + + # Run 1F1B in steady state. + for i in range(num_1f1b_micropairs): + # Perform forward computation + output_obj = self._forward_step( + engine, + input_obj, + return_tensors, + return_output_label=return_output_label, + accum_loss=accum_loss, + ) + + if gpc.is_last_rank(ParallelMode.PIPELINE): + output_obj_grad = None + else: + output_obj_grad = comm.send_forward_recv_backward( + output_obj, + backward_recv_shapes, + dtype=self.dtype, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + + # Add input_obj and output_obj to end of list. + input_objs.append(input_obj) + output_objs.append(output_obj) + + # Pop output_obj and output_obj from the start of the list for + # the backward pass. + input_obj = input_objs.pop(0) + output_obj = output_objs.pop(0) + + input_obj_grad = self._backward_step(engine, i, input_obj, output_obj, output_obj_grad) + + if i == (num_1f1b_micropairs - 1): + input_obj = None + if not gpc.is_first_rank(ParallelMode.PIPELINE): + comm.send_backward( + input_obj_grad, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + else: + if gpc.is_first_rank(ParallelMode.PIPELINE): + input_obj = None + else: + input_obj = comm.send_backward_recv_forward( + input_obj_grad, + forward_recv_shapes, + dtype=self.dtype, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + + # Run cooldown backward passes. + for i in range(num_warmup_microsteps): + input_obj = input_objs.pop(0) + output_obj = output_objs.pop(0) + + if not gpc.is_last_rank(ParallelMode.PIPELINE): + output_obj_grad = comm.recv_backward( + backward_recv_shapes, + dtype=self.dtype, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + else: + output_obj_grad = None + + input_obj_grad = self._backward_step( + engine, num_1f1b_micropairs + i, input_obj, output_obj, output_obj_grad + ) + + if not gpc.is_first_rank(ParallelMode.PIPELINE): + comm.send_backward(input_obj_grad, scatter_gather_tensors=self.scatter_gather_tensors) + + output, label = pack_return_tensors(return_tensors) if len(return_tensors) > 0 else (None, None) + + return output, label, accum_loss + + def forward_backward_step(self, engine, data_iter, forward_only=False, return_loss=True, return_output_label=True): + """Runs non-interleaved 1F1B schedule, with communication between pipeline stages. + Returns a tuple with losses if the last stage, an empty tuple otherwise. + + Args: + engine (colossalai.engine.Engine): Colossalai engine for training and inference. + data_iter (Iterable): Dataloader as the form of an iterator, obtained by calling iter(dataloader). + forward_only (bool, optional): + Whether run forward step only. Default is false. If true, no backward will be run. + return_loss (bool, optional): Whether returns the loss value. Default is true. + return_output_label (bool, optional): If False, the output and label won't be returned. + Returns: + Tuple[:class:`torch.Tensor`]: A tuple of (output, label, loss), loss and label could be None. + """ + + assert ( + forward_only or return_loss + ), "The argument 'return_loss' has to be True when 'forward_only' is False, but got False." + + # Load data first + self.load_batch(engine, data_iter) + + if forward_only: + return self._forward_only_step(engine, return_loss, return_output_label) + else: + return self._forward_backward_step(engine, return_loss, return_output_label) + + +class InterleavedPipelineScheduler(PipelineScheduler): + """ + Interleaved Pipeline Scheduler. + """ + + def __init__( + self, + num_microbatches: int, + num_chunks: int, + dtype: torch.dtype = torch.float, + data_process_func: Callable = None, + tensor_shape: Union[torch.Size, List[int], Tuple[int]] = None, + scatter_gather_tensors: bool = False, + scheduler_hooks: Optional[List[SchedulerHook]] = None, + communication_overlap: bool = False, + ): + """A helper schedule class for pipeline parallelism running environment. + It uses interleaved 1F1B strategy. Other properties are similar as + :class:`NonPipelineSchedule`. + + Args: + num_microbatches (int): The number of microbatches. + num_chunks (int): The number of model chunks. + dtype (torch.dtype, optional): The data type of the tensors. Default is torch.float. + data_process_func (Callable, optional): + The preprocessing function which receives a batch of data, and it will be executed in `load_batch`. + tensor_shape (torch.Size, optional): Specified shape in pipeline communication. + scatter_gather_tensors (bool, optional): + If set to `True`, communication will be reduced over pipeline when using 1D tensor parallelization. + scheduler_hooks (List[SchedulerHook], optional): List of scheduler hooks. Default is None. + communication_overlap (bool, optional): Whether to enable communication overlap. Default is False. + """ + assert ( + num_microbatches % gpc.get_world_size(ParallelMode.PIPELINE) == 0 + ), "num_microbatches must be an integer multiple of pipeline parallel world size" + + assert ( + isinstance(num_chunks, int) and num_chunks > 0 + ), f"expected num_chunks to be an integer and larger than 0, but got {num_chunks}" + + super().__init__( + num_microbatches, + dtype=dtype, + data_process_func=data_process_func, + tensor_shape=tensor_shape, + scatter_gather_tensors=scatter_gather_tensors, + scheduler_hooks=scheduler_hooks, + ) + + gpc.set_virtual_pipeline_parallel_size(num_chunks) + gpc.set_virtual_pipeline_parallel_rank(0) + + self._num_chunks = num_chunks + self._communication_overlap = communication_overlap + # switch 1f1b loop runner function according to communication overlap + self._run_1f1b_loop = ( + self._run_1f1b_loop_with_overlap if communication_overlap else self._run_1f1b_loop_without_overlap + ) + + # states + self._pp_size = gpc.get_world_size(ParallelMode.PIPELINE) + self._pp_rank = gpc.get_local_rank(ParallelMode.PIPELINE) + + self._accum_loss = None + self._return_tensors = None + self._input_objs = [[] for _ in range(num_chunks)] + self._output_objs = [[] for _ in range(num_chunks)] + self._output_obj_grads = [[] for _ in range(num_chunks)] + + self._input_obj_shapes = [self.tensor_shape for _ in range(num_chunks)] + self._output_obj_shapes = [None for _ in range(num_chunks)] + self._send_tensor_shape_flags = [self.tensor_shape is None for _ in range(num_chunks)] + + @property + def tensor_shape(self) -> torch.Size: + return self._tensor_shape + + @tensor_shape.setter + def tensor_shape(self, tensor_shape: torch.Size): + self._tensor_shape = tensor_shape + self._input_obj_shapes = [self._tensor_shape for _ in range(self._num_chunks)] + self._send_tensor_shape_flags = [self._tensor_shape is None for _ in range(self._num_chunks)] + + def _clear_state(self) -> None: + self._accum_loss = None + self._return_tensors = None + self._input_objs = [[] for _ in range(self._num_chunks)] + self._output_objs = [[] for _ in range(self._num_chunks)] + self._output_obj_grads = [[] for _ in range(self._num_chunks)] + + self._input_obj_shapes = [self.tensor_shape for _ in range(self._num_chunks)] + self._output_obj_shapes = [None for _ in range(self._num_chunks)] + self._send_tensor_shape_flags = [self.tensor_shape is None for _ in range(self._num_chunks)] + + def load_batch(self, engine, data_iter): + super().load_batch(engine, data_iter) + # overwrite microbatch_offset, since model chunks load the same microbatch, and should tract the offset + self.microbatch_offset = [0 for _ in range(self._num_chunks)] + + def load_micro_batch(self, model_chunk_id): + micro_batch_data, micro_batch_label = self._load_micro_batch( + data=self.batch_data, + label=self.batch_label, + offset=self.microbatch_offset[model_chunk_id], + micro_bsz=self.microbatch_size, + ) + micro_batch_data["label"] = micro_batch_label + self.microbatch_offset[model_chunk_id] += self.microbatch_size + return move_to_device(micro_batch_data) + + def _forward_step(self, engine, chunk_id): + """Forward step for passed-in model. If it is the first stage, the input tensor + is obtained from data_iterator, otherwise the passed-in input_obj is used. + Returns output tensor. This is a helper function and can be ignored by users. + + Args: + engine (colossalai.engine.Engine): Colossalai engine for training and inference. + chunk_id (int): The id of model chunks. + Returns: + Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: output or the loss value of the current + pipeline stage. + """ + gpc.set_virtual_pipeline_parallel_rank(chunk_id) + + if gpc.is_pipeline_first_stage() and len(self._input_objs[chunk_id]) == len(self._output_objs[chunk_id]): + self._input_objs[chunk_id].append(None) + input_obj = self._input_objs[chunk_id][-1] + + micro_batch_data = self.load_micro_batch(chunk_id) + data, label = self._get_data_label_for_current_step(input_obj, micro_batch_data) + + self._call_hooks("before_forward", data) + output_obj = self._call_engine(engine.model[chunk_id], data) + # Convert output_obj to fp32 when last model chunk of last stage + if gpc.is_pipeline_last_stage(ignore_virtual=False) and isinstance(engine.model[chunk_id], NaiveAMPModel): + output_obj = engine.model[chunk_id].convert_to_fp32(output_obj) + self._call_hooks("after_forward", output_obj) + + if gpc.is_pipeline_last_stage(): + self._call_hooks("post_helper_func", output_obj, label) + + if self._return_tensors is not None: + self._return_tensors.append((output_obj, label)) + if self._accum_loss is not None: + self._call_hooks("before_criterion", output_obj, label) + loss = self._call_engine_criterion(engine, output_obj, label) + self._call_hooks("after_criterion", loss) + + loss_reduced = loss / self.num_microbatches + self._accum_loss.add_(loss_reduced.detach()) + output_obj = loss_reduced + + self._output_objs[chunk_id].append(output_obj) + + return output_obj + + def _backward_step(self, engine, chunk_id, step_id): + """ + Backward step for passed-in model. If it is the last stage, the input tensor + is obtained from the previous forward step, otherwise the passed-in input_obj is used. + Returns input tensor gradient. This is a helper function and can be ignored by users. + + Args: + engine (colossalai.engine.Engine): Colossalai engine for training and inference. + chunk_id (int): The id of model chunks. + step_id (int): The current step id. + + Returns: + Union[:class:`torch.Tensor`, List[:class:`torch.Tensor`]]: input tensor gradient. + """ + gpc.set_virtual_pipeline_parallel_rank(chunk_id) + + if gpc.is_pipeline_last_stage() and len(self._output_obj_grads[chunk_id]) == 0: + self._output_obj_grads[chunk_id].append(None) + + input_obj = self._input_objs[chunk_id].pop(0) + output_obj = self._output_objs[chunk_id].pop(0) + output_obj_grad = self._output_obj_grads[chunk_id].pop(0) + + input_obj_grad = super()._backward_step(engine, step_id, input_obj, output_obj, output_obj_grad) + + return input_obj_grad + + def _get_chunk_by_microbatch(self, step_id: int, backward: bool = False) -> int: + """Helper method to get the model chunk ID given the iteration number.""" + microbatch_id_in_group = step_id % (self._pp_size * self._num_chunks) + chunk_id = microbatch_id_in_group // self._pp_size + + if backward: + chunk_id = self._num_chunks - chunk_id - 1 + + return chunk_id + + def _get_current_microbatch_id(self, step_id: int) -> int: + # format: + # microstep_id : 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 + # microbatch_id: 1 2 3 4 1 2 3 4 5 6 7 8 5 6 7 8 + num_microbatch_group = step_id // (self._pp_size * self._num_chunks) + step_id_in_group = step_id % (self._pp_size * self._num_chunks) + + microbatch_id = num_microbatch_group * self._pp_size + step_id_in_group % self._pp_size + + return microbatch_id + + def _run_warmup_loop( + self, + engine: Engine, + num_microsteps: int, + num_warmup_microsteps: int, + receive_extra_backward: bool = False, + forward_only: bool = False, + ) -> None: + """ + Run the warm-up loop and prepare data for the 1F1B stage. + + During the warm-up process, for each execution, it first performs a forward computation, + and then sends the computation result to the next stage. + It also receives data for the next forward computation. + Since the input for the first forward computation is not considered initially, + it needs to receive data once at the beginning. + + After the warm-up is completed, we need to prepare data for the 1F1B stage. + The data preparation process should be consistent with the communication method of the 1F1B stage. + + Args: + engine (Engine): The engine to run the warm-up loop. + num_microsteps (int): The total number of microsteps. + num_warmup_microsteps (int): The number of warm-up microsteps. + receive_extra_backward (bool, optional): Whether to receive extra backward input for the 1F1B stage. + Default is False. + forward_only (bool, optional): Whether to only perform forward pass. Default is False. + """ + if not gpc.is_pipeline_first_stage(): + if self._input_obj_shapes[0] is None: + self._input_obj_shapes[0] = comm.recv_obj_meta(self._input_obj_shapes[0]) + self._input_objs[0].append( + comm.recv_forward( + self._input_obj_shapes[0], + dtype=self.dtype, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + ) + else: + self._input_objs[0].append(None) + + for k in range(num_warmup_microsteps): + chunk_id = self._get_chunk_by_microbatch(k) + + output_obj = self._forward_step(engine, chunk_id) + + if forward_only: + # when forward-only, no need to save tensors for a backward pass + self._input_objs[chunk_id].pop() + self._output_objs[chunk_id].pop() + + if not gpc.is_pipeline_last_stage(): + if isinstance(output_obj, torch.Tensor): + self._output_obj_shapes[chunk_id] = output_obj.shape + else: + self._output_obj_shapes[chunk_id] = [out_tensor.shape for out_tensor in output_obj] + + if self._send_tensor_shape_flags[chunk_id]: + comm.send_obj_meta(output_obj) + self._send_tensor_shape_flags[chunk_id] = False # send only once for each chunk. + + # Determine if tensor should be received from previous stage. + next_forward_chunk_id = self._get_chunk_by_microbatch(k + 1) + + with switch_virtual_pipeline_parallel_rank(next_forward_chunk_id): + if not gpc.is_pipeline_first_stage() and self._input_obj_shapes[next_forward_chunk_id] is None: + self._input_obj_shapes[next_forward_chunk_id] = comm.recv_obj_meta() + if k == (num_microsteps - 1) or gpc.is_pipeline_first_stage(): + input_shape = None + else: + input_shape = self._input_obj_shapes[next_forward_chunk_id] + + # Don't send tensor downstream if on last stage. + if gpc.is_pipeline_last_stage(): + output_obj = None + + # Send and receive tensors as appropriate (send tensors computed + # in this iteration; receive tensors for next iteration). + if k != (num_warmup_microsteps - 1) or not receive_extra_backward: + # Normal warm-up communication process, or no need to prepare backward input for the 1F1B stage + input_obj = comm.send_forward_recv_forward( + output_obj, + input_shape, + dtype=self.dtype, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + else: + # Receive output_obj_grad for next backward, if receive_extra_backward is True. + if self._communication_overlap: + # In this case, we should handle forward and backward communication separately, consistent with the + # overlap version of the 1F1B stage + input_obj = comm.send_forward_recv_forward( + output_obj, + input_shape, + dtype=self.dtype, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + output_obj_grad = comm.send_backward_recv_backward( + None, # nothing to send + self._output_obj_shapes[self._num_chunks - 1], + dtype=self.dtype, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + self._output_obj_grads[self._num_chunks - 1].append(output_obj_grad) + else: + # In this case, we should handle forward and backward communication together, consistent with the + # non-overlap version of the 1F1B stage + input_obj, output_obj_grad = comm.send_forward_backward_recv_forward_backward( + output_obj, + None, # no backward grad to send + input_shape, + self._output_obj_shapes[self._num_chunks - 1], + dtype=self.dtype, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + self._output_obj_grads[self._num_chunks - 1].append(output_obj_grad) + + self._input_objs[next_forward_chunk_id].append(input_obj) + + def _run_1f1b_loop_with_overlap( + self, + engine: Engine, + num_warmup_microsteps: int, + num_1f1b_micropairs: int, + all_warmup_microsteps: bool = False, + ) -> None: + """ + Run the 1F1B loop with overlap. + + The 1F1B loop with overlap consists of the following steps: + 1. Perform the forward pass. + 2. Check if the backward input is ready. + 3. Send the forward output and receive the forward input for the next iteration. + 4. Perform the backward pass. + 5. Check if the forward input is ready. + 6. Send the backward output and receive the backward input for the next iteration. + + Args: + engine (Engine): The engine to run the 1F1B loop. + num_warmup_microsteps (int): The number of warm-up microsteps. + num_1f1b_micropairs (int): The number of 1F1B micropairs. + all_warmup_microsteps (bool, optional): Whether to run all warm-up microsteps. Default is False. + """ + + backward_async_communicator = None + + # Run 1F1B in steady state. + for k in range(num_1f1b_micropairs): + forward_microstep_id = k + num_warmup_microsteps + backward_microstep_id = k + forward_chunk_id = self._get_chunk_by_microbatch(forward_microstep_id) + backward_chunk_id = self._get_chunk_by_microbatch(backward_microstep_id, backward=True) + + # 1. Forward pass. + output_obj = self._forward_step(engine, forward_chunk_id) + + # 2. Check if the backward input is ready. + if backward_async_communicator is not None: + output_obj_grad = backward_async_communicator.wait_and_receive() + + if backward_async_communicator.need_receive: + self._output_obj_grads[backward_chunk_id].append(output_obj_grad) + + # 3. Send the forward outputs and receive the forward inputs from the previous rank. + + # Check if it is the last model chunk of the last pipeline stage, no need to send forward output. + gpc.set_virtual_pipeline_parallel_rank(forward_chunk_id) + if gpc.is_pipeline_last_stage(): + output_obj = None + + # Check if it needs to receive the results from the previous rank. + next_forward_chunk_id = self._get_chunk_by_microbatch(forward_microstep_id + 1) + with switch_virtual_pipeline_parallel_rank(next_forward_chunk_id): + if gpc.is_pipeline_first_stage() or k == num_1f1b_micropairs - 1: + input_obj_shape = None + else: + input_obj_shape = self._input_obj_shapes[next_forward_chunk_id] + + forward_async_communicator = comm.AsynCommunicator( + output_obj, + input_obj_shape, + self.dtype, + self.scatter_gather_tensors, + forward=True, + ) + forward_async_communicator.start() + + # 5. Backward pass. + + input_obj_grad = self._backward_step(engine, backward_chunk_id, backward_microstep_id) + + input_obj = forward_async_communicator.wait_and_receive() + if forward_async_communicator.need_receive: + self._input_objs[next_forward_chunk_id].append(input_obj) + + # 6. Send the backward output and receive the backward input for the next iteration. + gpc.set_virtual_pipeline_parallel_rank(backward_chunk_id) + if gpc.is_pipeline_first_stage(): + input_obj_grad = None + + next_backward_chunk_id = self._get_chunk_by_microbatch(backward_microstep_id + 1, backward=True) + with switch_virtual_pipeline_parallel_rank(next_backward_chunk_id): + if gpc.is_pipeline_last_stage(): + output_obj_shape = None + else: + output_obj_shape = self._output_obj_shapes[next_backward_chunk_id] + + backward_async_communicator = comm.AsynCommunicator( + input_obj_grad, + output_obj_shape, + self.dtype, + self.scatter_gather_tensors, + forward=False, + ) + backward_async_communicator.start() + + if all_warmup_microsteps: + if not gpc.is_pipeline_last_stage(): + self._output_obj_grads[self._num_chunks - 1].append( + comm.recv_backward( + self._output_obj_shapes[self._num_chunks - 1], + dtype=self.dtype, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + ) + else: + self._output_obj_grads[self._num_chunks - 1].append(None) + else: + output_obj_grad = backward_async_communicator.wait_and_receive() + if backward_async_communicator.need_receive: + backward_chunk_id = self._get_chunk_by_microbatch(num_1f1b_micropairs, backward=True) + self._output_obj_grads[backward_chunk_id].append(output_obj_grad) + + def _run_1f1b_loop_without_overlap( + self, + engine: Engine, + num_warmup_microsteps: int, + num_1f1b_micropairs: int, + all_warmup_microsteps: bool = False, + ) -> None: + """ + Run the 1F1B loop without overlap. + + The 1F1B loop without overlap consists of the following steps: + 1. Perform the forward pass. + 2. Perform the backward pass. + 3. Send the forward output of this iteration to the next stage, and send the backward output of this iteration + to the previous stage, + and receive the forward and backward inputs for the next iteration. + + Args: + engine (Engine): The engine to use for computation. + num_warmup_microsteps (int): The number of warmup microsteps. + num_1f1b_micropairs (int): The number of 1F1B micro-pairs. + all_warmup_microsteps (bool, optional): Whether to run all warmup microsteps. Defaults to False. + """ + for k in range(num_1f1b_micropairs): + # Forward pass. + forward_microstep_id = k + num_warmup_microsteps + forward_chunk_id = self._get_chunk_by_microbatch(forward_microstep_id) + output_obj = self._forward_step(engine, forward_chunk_id) + + # Backward pass. + backward_microstep_id = k + backward_chunk_id = self._get_chunk_by_microbatch(backward_microstep_id, backward=True) + input_obj_grad = self._backward_step(engine, backward_chunk_id, backward_microstep_id) + + # Send output_obj and input_obj_grad, receive input_obj + # and output_obj_grad. + + # Determine if current stage has anything to send in either direction, + # otherwise set obj to None. + gpc.set_virtual_pipeline_parallel_rank(forward_chunk_id) + if gpc.is_pipeline_last_stage(): + output_obj = None + + gpc.set_virtual_pipeline_parallel_rank(backward_chunk_id) + if gpc.is_pipeline_first_stage(): + input_obj_grad = None + + # Determine if peers are sending, and where in data structure to put + # received tensors. + next_forward_chunk_id = self._get_chunk_by_microbatch(forward_microstep_id + 1) + with switch_virtual_pipeline_parallel_rank(next_forward_chunk_id): + if gpc.is_pipeline_first_stage() or k == num_1f1b_micropairs - 1: + recv_prev = False + else: + recv_prev = True + + next_backward_chunk_id = self._get_chunk_by_microbatch(backward_microstep_id + 1, backward=True) + with switch_virtual_pipeline_parallel_rank(next_backward_chunk_id): + if gpc.is_pipeline_last_stage(): + recv_next = False + else: + recv_next = True + + input_shape = self._input_obj_shapes[next_forward_chunk_id] if recv_prev else None + output_shape = self._output_obj_shapes[next_backward_chunk_id] if recv_next else None + + # Communicate objs. + input_obj, output_obj_grad = comm.send_forward_backward_recv_forward_backward( + output_obj, + input_obj_grad, + input_shape, + output_shape, + dtype=self.dtype, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + + # Put input_obj and output_obj_grad in data structures in the + # right location. + if recv_prev: + self._input_objs[next_forward_chunk_id].append(input_obj) + if recv_next: + self._output_obj_grads[next_backward_chunk_id].append(output_obj_grad) + + # receive necessary data for next cooldown loop + if all_warmup_microsteps: + if not gpc.is_pipeline_last_stage(): + self._output_obj_grads[self._num_chunks - 1].append( + comm.recv_backward( + self._output_obj_shapes[self._num_chunks - 1], + dtype=self.dtype, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + ) + else: + self._output_obj_grads[self._num_chunks - 1].append(None) + + def _run_cooldown_loop(self, engine: Engine, num_microsteps: int, num_1f1b_micropairs: int) -> None: + """ + Run the cooldown loop. + + The cooldown loop consists of the following steps: + 1. Perform the backward step. + 2. Send the backward output to the next stage and receive inputs for next backward. + + Args: + engine (Engine): The engine to use for computation. + num_microsteps (int): The total number of microsteps. + num_1f1b_micropairs (int): The number of 1F1B micro-pairs. + """ + for k in range(num_1f1b_micropairs, num_microsteps): + chunk_id = self._get_chunk_by_microbatch(k, backward=True) + + input_obj_grad = self._backward_step(engine, chunk_id, k) + + next_backward_chunk_id = self._get_chunk_by_microbatch(k + 1, backward=True) + + if k != (num_microsteps - 1) and not ( + gpc.is_pipeline_last_stage(ignore_virtual=True) and next_backward_chunk_id == (self._num_chunks - 1) + ): + output_shape = self._output_obj_shapes[next_backward_chunk_id] + else: + output_shape = None + + self._output_obj_grads[next_backward_chunk_id].append( + comm.send_backward_recv_backward( + input_obj_grad, + output_shape, + dtype=self.dtype, + scatter_gather_tensors=self.scatter_gather_tensors, + ) + ) + + def _forward_only_step(self, engine: Engine): + num_microsteps = self.num_microbatches * self._num_chunks + num_warmup_microsteps = num_microsteps + + self._run_warmup_loop( + engine, + num_microsteps, + num_warmup_microsteps, + receive_extra_backward=False, + forward_only=True, + ) + + def _forward_backward_step(self, engine: Engine): + # Compute number of warmup and remaining microbatches. + all_warmup_microsteps = False + num_microsteps = self.num_microbatches * self._num_chunks + + # Run all forward passes and then all backward passes if number of + # microbatches is just the number of pipeline stages. + # Otherwise, perform (num_chunks-1)*pipeline_parallel_size on + # all workers, followed by more microbatches after depending on + # stage ID (more forward passes for earlier stages, later stages can + # immediately start with 1F1B). + if self.num_microbatches == self._pp_size: + num_warmup_steps = num_microsteps + all_warmup_microsteps = True + else: + num_warmup_steps = (self._pp_size - self._pp_rank - 1) * 2 + num_warmup_steps += (self._num_chunks - 1) * self._pp_size + num_warmup_steps = min(num_warmup_steps, num_microsteps) + num_1f1b_micropairs = num_microsteps - num_warmup_steps + + # We usually need to prepare an extra backward data for the 1F1B stage when the WarmUp stage ends, + # because the 1F1B stage typically performs one forward and backward pass together, + # except in the following cases: + receive_extra_backward = not ( + all_warmup_microsteps # Only warmup microsteps + or gpc.is_pipeline_last_stage(ignore_virtual=True) # The rank is the last pipeline stage + ) + + # 1. Warmup + self._run_warmup_loop( + engine, + num_microsteps, + num_warmup_steps, + receive_extra_backward=receive_extra_backward, + ) + + # 2. 1F1B + self._run_1f1b_loop( + engine, + num_warmup_steps, + num_1f1b_micropairs=num_1f1b_micropairs, + all_warmup_microsteps=all_warmup_microsteps, + ) + + # 3. Cooldown + self._run_cooldown_loop(engine, num_microsteps, num_1f1b_micropairs=num_1f1b_micropairs) + + def forward_backward_step(self, engine, data_iter, forward_only=False, return_loss=True, return_output_label=True): + """Run interleaved 1F1B schedule (model split into model chunks), with + communication between pipeline stages as needed. + + Args: + engine (colossalai.engine.Engine): Colossalai engine for training and inference. + data_iter (Iterable): Dataloader as the form of an iterator, obtained by calling iter(dataloader). + forward_only (bool, optional): + Whether run forward step only. Default is false. If true, no backward will be run. + return_loss (bool, optional): Whether returns the loss value. Default is true. + return_output_label (bool, optional): If False, the output and label won't be returned. + + Returns: + Tuple[:class:`torch.Tensor`]: A tuple of (output, label, loss), loss and label could be None. + The loss would be returned only in the last stage. + """ + assert ( + forward_only or return_loss + ), "The argument 'return_loss' has to be True when 'forward_only' is False, but got False." + + gpc.set_virtual_pipeline_parallel_rank(0) + + self.load_batch(engine, data_iter) + + if return_loss and gpc.is_pipeline_last_stage(ignore_virtual=True): + self._accum_loss = torch.zeros(1, device=get_current_device()) + if return_output_label: + self._return_tensors = [] + + if forward_only: + self._forward_only_step(engine) + else: + self._forward_backward_step(engine) + + if return_output_label and len(self._return_tensors) > 0: + output, label = pack_return_tensors(self._return_tensors) + else: + output, label = (None, None) + accum_loss = self._accum_loss + + self._clear_state() + + return output, label, accum_loss diff --git a/internlm/core/trainer.py b/internlm/core/trainer.py index a10d03e..a027fed 100644 --- a/internlm/core/trainer.py +++ b/internlm/core/trainer.py @@ -7,7 +7,12 @@ import json from typing import Iterable, Optional from internlm.core.engine import Engine -from internlm.core.no_pipeline_scheduler import BaseScheduler, NonPipelineScheduler +from internlm.core.scheduler import ( + BaseScheduler, + InterleavedPipelineScheduler, + NonPipelineScheduler, + PipelineScheduler, +) class TrainState: @@ -33,6 +38,11 @@ class TrainState: # Total step count self.total_steps: int = config.data.total_steps + # resume tensorboard folder, need load from checkpoint or set manually. + self.resume_tb_folder = config.resume_tb_folder + + self.tensorboard_folder = config.tensorboard_folder + def init_batch_sampler(self, train_dl): # Copy of the batch sampler from the DataLoader self.batch_sampler = train_dl.batch_sampler.copy() @@ -71,6 +81,9 @@ class TrainState: self.batch_sampler = train_dl.batch_sampler.copy() self.batch_sampler_iter = iter(self.batch_sampler) + # resume tensorboard from older tensorboard_folder + self.resume_tb_folder = other_stuffs.get("tensorboard_folder", None) + def state_dict(self): return { "batch_count": self.batch_count, @@ -78,6 +91,7 @@ class TrainState: "num_consumed_tokens": self.num_consumed_tokens, "inf_nan_skip_batches": self.inf_nan_skip_batches, "step_count": self.step_count, + "tensorboard_folder": self.tensorboard_folder, } @@ -112,8 +126,7 @@ class Trainer: ), f"expected schedule to be of type BaseSchedule, but got {type(schedule)}" self._schedule = schedule - if self.uses_pipeline: - self._schedule.pre_processing(self) + self._schedule.pre_processing(self._engine) @property def engine(self): @@ -126,7 +139,7 @@ class Trainer: @property def uses_pipeline(self): """Returns whether the pipeline parallel is used or not.""" - return False + return isinstance(self._schedule, (PipelineScheduler, InterleavedPipelineScheduler)) def train(self): self._engine.train() diff --git a/internlm/data/batch_sampler.py b/internlm/data/batch_sampler.py index 1ee4126..16fd6fc 100644 --- a/internlm/data/batch_sampler.py +++ b/internlm/data/batch_sampler.py @@ -219,11 +219,6 @@ class StaticBatchSampler: assert ( batch_size - self.start_bsz ) % self.bsz_incre == 0, f"{batch_size} - {self.start_bsz} should be multiple of {self.bsz_incre}" - assert ( - self.start_bsz // micro_bsz >= 4 - ), f"Must have more start samples:`{self.start_bsz}` with micro_bsz:\ - `{micro_bsz}`, so that the pipeline can run correctly" - assert batch_size % micro_bsz == 0, f"batch_size({batch_size}) should be multiple of micro_bsz({micro_bsz})" assert ( self.start_bsz % micro_bsz == 0 diff --git a/internlm/data/dataset.py b/internlm/data/dataset.py new file mode 100644 index 0000000..401e510 --- /dev/null +++ b/internlm/data/dataset.py @@ -0,0 +1,56 @@ +import os +from typing import Dict + +from torch.utils.data import ConcatDataset + +from internlm.data.single_dataset import JsonlDataset + + +def get_dataset_dict(folder, split="valid") -> Dict: + """ + Return a dictionary of Datasets from a folder containing data files for validation. + + Args: + folder (str): The path to the folder containing data files. + split (str): The split of the data files to be used, default is "valid". + + Returns: + A dictionary containing Datasets for each folder in the given path + that contains data files with the specified split. + + Raises: + AssertionError: If the given folder does not exist. + + Example: + If the given folder is as follows, + - data + - zhihu + - xxx.bin + - valid.bin + - baike + - xxx.bin + - valid.bin + + The returned dictionary will be, + { + 'zhihu': Dataset, + 'baike': Dataset + } + """ + + assert os.path.exists(folder), f"folder `{folder}` not exists" + data_dict = {} + + for root, dirs, files in os.walk(folder, followlinks=True): + dirs.sort() # The order is guaranteed, and the newly added data starting with z needs to be ranked behind + datasets = [] + for fn in sorted(files): # Need sorted to ensure that the order is consistent + if fn.endswith(".bin") and split in fn: + fp = os.path.join(root, fn) + ds = JsonlDataset(fp) + datasets.append(ds) + if datasets: + ds = ConcatDataset(datasets=datasets) + data_dict[os.path.basename(root)] = ds + + return data_dict diff --git a/internlm/data/packed_dataset.py b/internlm/data/packed_dataset.py index e9151bf..c0d689f 100644 --- a/internlm/data/packed_dataset.py +++ b/internlm/data/packed_dataset.py @@ -144,6 +144,48 @@ class PackedDataset(torch.utils.data.Dataset): out = {"tokens": pack, "cu_seqlens": cu_seqlens, "indexes": indexes, "labels": labels, "type_ids": type_ids} return out + def cal_pos_unpack(self, index): + if index == 0: + pre_pos = 0 + else: + pre_pos = index * gpc.config.data["micro_bsz"] + + pos = (index + 1) * gpc.config.data["micro_bsz"] + return pre_pos, pos + + def build_unpack(self, index): + + pre_pos, pos = self.cal_pos_unpack(index) + + pack, cu_seqlens, indexes, labels, type_ids = [], [0], [], [], [] + + while pre_pos < pos and pre_pos < len(self.dataset): + sample_idx = self.sample_indices[pre_pos] + sample = self.dataset[sample_idx] + length = min(len(sample["tokens"]), self.max_length_per_sample) + chunk = sample["tokens"][0:length] + pack.extend(chunk) + _labels = deepcopy(chunk) + _labels = list(_labels[1:]) + [-100] + assert len(_labels) == len(chunk), (_labels, chunk) + labels.extend(_labels) + type_ids.extend([sample.get("type_id", 0)] * len(chunk)) + cu_seqlens.append(cu_seqlens[-1] + len(chunk)) + indexes.extend(list(range(length))) + pre_pos = pre_pos + 1 + + if cu_seqlens[-1] != self.packed_length: + pack = pack + [0] * (self.packed_length - cu_seqlens[-1]) + labels = labels + [0] * (self.packed_length - cu_seqlens[-1]) + type_ids = type_ids + [0] * (self.packed_length - cu_seqlens[-1]) + indexes.extend(list(range(self.packed_length - cu_seqlens[-1]))) + cu_seqlens.append(self.packed_length) + + assert len(pack) == self.packed_length + + out = {"tokens": pack, "cu_seqlens": cu_seqlens, "indexes": indexes, "labels": labels, "type_ids": type_ids} + return out + def __getitem__(self, item: int) -> Dict: """Given the index, it returns a dict as { @@ -154,8 +196,11 @@ class PackedDataset(torch.utils.data.Dataset): } """ - pos_before, token_id_before, pos_after, token_id_after = self.mapping(item) - return self.build_pack(pos_before, token_id_before, pos_after, token_id_after) + if gpc.config.model.use_flash_attn: + pos_before, token_id_before, pos_after, token_id_after = self.mapping(item) + return self.build_pack(pos_before, token_id_before, pos_after, token_id_after) + + return self.build_unpack(item) class PackedDatasetWithoutCuSeqlen(torch.utils.data.Dataset): diff --git a/internlm/data/utils.py b/internlm/data/utils.py index b003469..724fb9f 100644 --- a/internlm/data/utils.py +++ b/internlm/data/utils.py @@ -1,7 +1,11 @@ #!/usr/bin/env python # -*- encoding: utf-8 -*- -DATASET_TYPE_IDS_MAP = {"en": 0, "cn": 1, "code": 2, "ja": 3, "ar": 4, "kaoshi": 5} +import torch + +from internlm.core.context import global_context as gpc + +DATASET_TYPE_IDS_MAP = {"en": 0, "cn": 1, "code": 2} def get_dataset_type_id(path): @@ -13,3 +17,30 @@ def get_dataset_type_id(path): match_idxes.append(idx) assert len(match_idxes) == 1, f"{path}, match_idxes should be 1, but got {match_idxes} from {DATASET_TYPE_IDS_MAP}" return match_idxes[0] + + +def unpack_data(input_ids, cu_seqlens): + """ + input_ids: (n, packed_length) + Return: + output: (batch_size, max_length) + """ + + bsz = input_ids.shape[0] + + num_sequence = gpc.config.data["micro_bsz"] + + outputs = torch.zeros(bsz, num_sequence, gpc.config.data.seq_len, device=input_ids.device, dtype=input_ids.dtype) + + for i in range(bsz): + output = torch.zeros(num_sequence, gpc.config.data.seq_len, device=input_ids.device, dtype=input_ids.dtype) + cu_seqlens_slice = cu_seqlens[i] + for j in range(num_sequence): + seq_length = cu_seqlens_slice[j + 1] - cu_seqlens_slice[j] + output[j, 0:seq_length] = input_ids[0, cu_seqlens_slice[j] : cu_seqlens_slice[j + 1]] + outputs[i] = output + + if bsz == 1: + outputs = outputs.squeeze(0) + + return outputs diff --git a/internlm/initialize/__init__.py b/internlm/initialize/__init__.py index e8aef0b..ae94e0a 100644 --- a/internlm/initialize/__init__.py +++ b/internlm/initialize/__init__.py @@ -1,9 +1,15 @@ from .initialize_trainer import initialize_trainer -from .launch import get_default_parser, launch_from_slurm, launch_from_torch +from .launch import ( + get_default_parser, + initialize_distributed_env, + launch_from_slurm, + launch_from_torch, +) __all__ = [ "get_default_parser", "initialize_trainer", "launch_from_slurm", "launch_from_torch", + "initialize_distributed_env", ] diff --git a/internlm/initialize/initialize_trainer.py b/internlm/initialize/initialize_trainer.py index 2ed22c5..b8b7179 100644 --- a/internlm/initialize/initialize_trainer.py +++ b/internlm/initialize/initialize_trainer.py @@ -3,7 +3,7 @@ # adopted from https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/initialize -from typing import Callable, Iterable, Optional, Tuple +from typing import Callable, Iterable, List, Optional, Tuple from torch import nn from torch.nn.modules.loss import _Loss @@ -11,11 +11,19 @@ from torch.optim.lr_scheduler import _LRScheduler from torch.optim.optimizer import Optimizer from torch.utils.data import DataLoader +from internlm.core.context import ParallelMode from internlm.core.context import global_context as gpc from internlm.core.engine import Engine from internlm.core.gradient_handler import PipelineSharedModuleGradientHandler -from internlm.core.no_pipeline_scheduler import NonPipelineScheduler +from internlm.core.scheduler import ( + InterleavedPipelineScheduler, + NonPipelineScheduler, + PipelineScheduler, + SchedulerHook, +) +from internlm.core.scheduler.pipeline_scheduler import get_tensor_shape from internlm.core.trainer import Trainer +from internlm.data.utils import unpack_data from internlm.solver.beta2_scheduler import Beta2Scheduler from internlm.solver.optimizer.hybrid_zero_optim import BaseOptimizer from internlm.utils.common import get_current_device @@ -29,6 +37,7 @@ def initialize_trainer( test_dataloader: Optional[Iterable] = None, lr_scheduler: Optional[_LRScheduler] = None, beta2_scheduler: Optional[Beta2Scheduler] = None, + scheduler_hooks: Optional[List[SchedulerHook]] = None, ) -> Tuple[Trainer, DataLoader, DataLoader, _LRScheduler]: """Core function to wrap the essential training components with our functionality based on the config which is loaded into gpc.config. @@ -59,6 +68,8 @@ def initialize_trainer( assert isinstance(optimizer, BaseOptimizer), "optimizer must be instance of BaseOptimizer" # gradient handler, only support PipelineSharedModuleGradientHandler now + if gpc.is_using_pp(): + gpc.config.gradient_handler = [dict(type="PipelineSharedModuleGradientHandler")] gradient_handler_cfg = gpc.config.get("gradient_handler", []) gradient_handlers = [] assert isinstance(gradient_handler_cfg, list), f"gradient_handler must be list but got {type(gradient_handler_cfg)}" @@ -67,8 +78,50 @@ def initialize_trainer( handler = PipelineSharedModuleGradientHandler(model=model, optimizer=optimizer) gradient_handlers.append(handler) - scheduler = NonPipelineScheduler(gradient_accumulation_size=gpc.config.data.gradient_accumulation) + # initialize scheduler for trainer + scheduler = None + if gpc.config.model.use_flash_attn: + data_fn = None + else: + data_fn = unpack_data + if gpc.is_using_pp(): + gpc.config.NUM_MICRO_BATCHES = gpc.config.data.micro_num + tensor_shape = get_tensor_shape() + use_interleaved = ( + hasattr(gpc.config, "model") and hasattr(gpc.config.model, "num_chunks") and gpc.config.model.num_chunks > 1 + ) + scatter_gather = gpc.is_initialized(ParallelMode.TENSOR) + if use_interleaved: + if isinstance(model, nn.Sequential): + model = nn.ModuleList([model]) + communication_overlap = gpc.config.parallel["pipeline"].get("interleaved_overlap", False) + scheduler = InterleavedPipelineScheduler( + num_microbatches=gpc.config.NUM_MICRO_BATCHES, + num_chunks=gpc.config.model.num_chunks, + dtype=gpc.config.model["dtype"], + tensor_shape=tensor_shape, + scatter_gather_tensors=scatter_gather, + scheduler_hooks=scheduler_hooks, + communication_overlap=communication_overlap, + ) + else: + scheduler = PipelineScheduler( + data_process_func=data_fn, + num_microbatches=gpc.config.NUM_MICRO_BATCHES, + dtype=gpc.config.model["dtype"], + tensor_shape=tensor_shape, + scatter_gather_tensors=scatter_gather, + scheduler_hooks=scheduler_hooks, + ) + else: + scheduler = NonPipelineScheduler( + data_process_func=data_fn, + gradient_accumulation_size=gpc.config.data.gradient_accumulation, + scheduler_hooks=scheduler_hooks, + ) + + # initialize engine for trainer engine = Engine( model=model, optimizer=optimizer, diff --git a/internlm/initialize/launch.py b/internlm/initialize/launch.py index f203b2e..a69a506 100644 --- a/internlm/initialize/launch.py +++ b/internlm/initialize/launch.py @@ -10,7 +10,9 @@ import torch from internlm.core.context import Config from internlm.core.context import global_context as gpc +from internlm.utils.common import get_master_node from internlm.utils.logger import get_logger +from internlm.utils.storage_manager import init_storage_manager logger = get_logger(__file__) @@ -38,7 +40,7 @@ def get_default_parser(): parser.add_argument("--local_rank", type=int, help="local rank on the node") parser.add_argument("--backend", type=str, default="nccl", help="backend for distributed communication") parser.add_argument("--seed", type=int, default=1024) - parser.add_argument("--profiling", default=False, action="store_true", help="enable/diable profiling.") + parser.add_argument("--profiling", default=False, action="store_true", help="enable/disable profiling.") return parser @@ -89,6 +91,12 @@ def args_sanity_check(): if "valid_folder" not in data: data._add_item("valid_folder", None) + if "valid_micro_num" not in data: + data._add_item("valid_micro_num", data.micro_num) + + if "valid_every" not in data: + data._add_item("valid_every", 0) + if gpc.is_rank_for_log(): logger.info("+" * 15 + " Data Info " + "+" * 15) # pylint: disable=W1201 logger.info(f"seq_len: {data.seq_len}") @@ -97,36 +105,104 @@ def args_sanity_check(): logger.info(f"packed_length: {data.packed_length}") logger.info(f"pack_sample_into_one: {data.pack_sample_into_one}") logger.info(f"min_length: {data.min_length}") + logger.info(f"valid_micro_num: {data.valid_micro_num}") + logger.info(f"valid_every: {data.valid_every}") # processing the checkpoint config - if "checkpoint_every" not in gpc.config.ckpt or gpc.config.ckpt.checkpoint_every <= 0: - gpc.config.ckpt._add_item("checkpoint_every", float("inf")) + ckpt = gpc.config.ckpt + if "enable_save_ckpt" not in ckpt: + ckpt._add_item("enable_save_ckpt", False) - if "load_optimizer" not in gpc.config.ckpt: - gpc.config.ckpt._add_item("load_optimizer", True) + # Saving checkpoint args. + if ckpt.enable_save_ckpt: + assert "checkpoint_every" in ckpt, "If enable save checkpoint, must give checkpoint_every in config.data!" + assert ckpt.checkpoint_every > 0 + assert "save_ckpt_folder" in ckpt, "If enable save checkpoint, must give save_ckpt_folder in config.data!" - if "save_ckpt_folder" not in gpc.config.ckpt: - gpc.config.ckpt._add_item("save_ckpt_folder", None) + if "async_upload" not in ckpt: + ckpt._add_item("async_upload", False) # async defalut is False. + else: + if ckpt.async_upload: + assert "save_ckpt_folder" in ckpt + if "boto3:" not in ckpt.save_ckpt_folder: + if gpc.is_rank_for_log(): + logger.warning( + "Storing ckpt on file system does not support asynchronous storage, will use sync save!" + ) + ckpt.async_upload = False + else: + if "async_upload_tmp_folder" not in ckpt: + ckpt._add_item("async_upload_tmp_folder", "/dev/shm/internlm_tmp_ckpt/") - if "load_ckpt_folder" not in gpc.config.ckpt: - gpc.config.ckpt._add_item("load_ckpt_folder", None) + if not ckpt.async_upload: + ckpt._add_item("async_upload_tmp_folder", None) - if "load_model_only_folder" not in gpc.config.ckpt: - gpc.config.ckpt._add_item("load_model_only_folder", None) + if "snapshot_ckpt_folder" not in ckpt: + ckpt._add_item("snapshot_ckpt_folder", os.path.join(ckpt.save_ckpt_folder, "snapshot")) - assert not ( - gpc.config.ckpt.load_ckpt_folder is not None and gpc.config.ckpt.load_model_only_folder is not None - ), "'load_ckpt_folder' and 'load_model_only_folder' cannot be set at the same time." + if "oss_snapshot_freq" not in ckpt: + ckpt._add_item("oss_snapshot_freq", float("inf")) # if oss_snapshot_freq not given, we disable. + else: + ckpt._add_item("checkpoint_every", float("inf")) + ckpt._add_item("oss_snapshot_freq", float("inf")) + ckpt._add_item("save_ckpt_folder", None) + ckpt._add_item("async_upload", False) + ckpt._add_item("async_upload_tmp_folder", None) + ckpt._add_item("snapshot_ckpt_folder", None) + ckpt._add_item("snapshot_ckpt_folder", None) - gpc.config.ckpt._add_item( - "enable_ckpt", gpc.config.ckpt.save_ckpt_folder is not None and gpc.config.ckpt.checkpoint_every > 0 - ) + # Loading checkpoint args. + if "load_model_only_folder" not in ckpt: + ckpt._add_item("load_model_only_folder", None) + + if "load_ckpt_folder" not in ckpt: + ckpt._add_item("load_ckpt_folder", None) + + if "load_optimizer" not in ckpt: + ckpt._add_item("load_optimizer", True) + + if "stop_file_path" not in ckpt: + ckpt._add_item("stop_file_path", None) + + if "load_given_ckpt" not in ckpt: + # If 'load_given_ckpt' is not given, we set it to False, so internlm can have opportunity + # to auto-load latest checkpoint. + ckpt._add_item("load_given_ckpt", False) + + if ckpt.load_given_ckpt: + # Priority: load_given_ckpt(True) > latest_checkpoint > load_model_only_folder + if ckpt.load_ckpt_folder and ckpt.load_model_only_folder: + logger.warning( + "Detect 'load_ckpt_folder' and 'load_model_only_folder' set at the same time, \ +and 'load_given_ckpt' is True, so internlm will load from 'load_ckpt_folder'" + ) + ckpt.load_model_only_folder = None if gpc.is_rank_for_log(): logger.info("+" * 15 + " Ckpt Info " + "+" * 15) # pylint: disable=W1201 - logger.info(f"is enable save ckpt: {gpc.config.ckpt.enable_ckpt}") - logger.info(f"save_ckpt_folder: {gpc.config.ckpt.save_ckpt_folder}") - logger.info(f"checkpoint_every: {gpc.config.ckpt.checkpoint_every}") + logger.info(f"is enable save ckpt: {ckpt.enable_save_ckpt}") + logger.info(f"save_ckpt_folder: {ckpt.save_ckpt_folder}") + logger.info(f"checkpoint_every: {ckpt.checkpoint_every}") + logger.info(f"load_given_ckpt: {ckpt.load_given_ckpt}") + + # initialization storage manager + init_storage_manager(ckpt) + + # tensorboard writer config + if "enable_tb" not in gpc.config: + gpc.config._add_item("enable_tb", True) + if "tensorboard_folder" not in gpc.config: + gpc.config._add_item( + "tensorboard_folder", os.environ["tensorboard_folder"] if "tensorboard_folder" in os.environ else None + ) + if "resume_tb_folder" not in gpc.config: + gpc.config._add_item( + "resume_tb_folder", os.environ["resume_tb_folder"] if "resume_tb_folder" in os.environ else None + ) + + if gpc.is_rank_for_log(): + logger.info(f"tensorboard_folder: {gpc.config.tensorboard_folder}") + logger.info(f"resume_tb_folder: {gpc.config.resume_tb_folder}") # cudnn torch.backends.cudnn.benchmark = gpc.config.get("cudnn_benchmark", False) @@ -144,12 +220,24 @@ def args_sanity_check(): logger.warning("dtype is not set, use torch.float16 by defalut!") model._add_item("dtype", torch.float16) else: - if model.dtype == "torch.bfloat16": - model.dtype = torch.bfloat16 - elif model.dtype in ("torch.float16", "torch.half"): - model.dtype = torch.float16 + if gpc.config.model.dtype == "torch.bfloat16": + gpc.config.model.dtype = torch.bfloat16 + elif gpc.config.model.dtype in ("torch.float16", "torch.half"): + gpc.config.model.dtype = torch.float16 + elif gpc.config.model.dtype == "torch.float32": + gpc.config.model.dtype = torch.float32 + elif gpc.config.model.dtype == "torch.tf32": + torch.backends.cudnn.allow_tf32 = True + torch.backends.cuda.matmul.allow_tf32 = True + gpc.config.model.dtype = torch.float32 else: - assert model.dtype in ["torch.float16", "torch.half", "torch.bfloat16"] + assert gpc.config.model.dtype in [ + "torch.float16", + "torch.half", + "torch.bfloat16", + "torch.float32", + "torch.tf32", + ] if "checkpoint" in model: if model.checkpoint is True: @@ -177,6 +265,35 @@ def args_sanity_check(): logger.info("+" * 15 + " beta2_scheduler Info " + "+" * 15) # pylint: disable=W1201 logger.info(f"beta2_scheduler: {gpc.config.beta2_scheduler}") + # process the model config + if "use_flash_attn" not in gpc.config.model: + gpc.config.model._add_item("use_flash_attn", True) + + # process the parallel config + if "sequence_parallel" not in gpc.config.parallel: + gpc.config.parallel._add_item("sequence_parallel", False) + else: + assert not ( + gpc.config.parallel.sequence_parallel is True and gpc.config.model.use_flash_attn is False + ), "sequence parallel does not support use_flash_attn=False" + + # feishu webhook address for alerting + if "alert_address" not in gpc.config: + gpc.config._add_item("alert_address", None) + + optim_ckpt = gpc.config.hybrid_zero_optimizer + if "zero_overlap_communication" in optim_ckpt: + # Compatible with the old interfaces. + optim_ckpt._add_item("overlap_sync_grad", optim_ckpt.zero_overlap_communication) + if "overlap_sync_grad" not in optim_ckpt: + optim_ckpt._add_item("overlap_sync_grad", False) + if "overlap_sync_param" not in optim_ckpt: + optim_ckpt._add_item("overlap_sync_param", False) + if gpc.is_rank_for_log(): + logger.info( + f"overlap_sync_grad:{optim_ckpt.overlap_sync_grad}, overlap_sync_param:{optim_ckpt.overlap_sync_param}" + ) + def launch( config: Union[str, Path, Config, Dict], @@ -223,8 +340,6 @@ def launch( # init process groups for different parallel modes from config gpc.init_parallel_groups() - args_sanity_check() - # set cuda device if torch.cuda.is_available(): # if local rank is not given, calculate automatically @@ -277,7 +392,11 @@ def launch_from_slurm( ) -def launch_from_torch(config: Union[str, Path, Config, Dict], backend: str = "nccl", seed: int = 1024): +def launch_from_torch( + config: Union[str, Path, Config, Dict], + backend: str = "nccl", + seed: int = 1024, +): """A wrapper for internlm.launch for torchrun or torch.distributed.launch by reading rank and world size from the environment variables set by PyTorch @@ -305,3 +424,38 @@ def launch_from_torch(config: Union[str, Path, Config, Dict], backend: str = "nc backend=backend, seed=seed, ) + + +def initialize_distributed_env( + config: str, + launcher: str = "slurm", + master_port: int = 8888, + seed: int = 1024, + args_check=True, +): + """ + Initialize distributed environment for distributed training. + + Args: + config (str): Config file path. + launcher (str): Launcher for launching distributed environment, can be slurm or torch. "slurm" by default. + master_port (str): The master port for distributed training. 8888 by default. + seed (int, optional): Specified random seed for every process. 1024 by default. + """ + + torch.cuda.empty_cache() + + if launcher == "torch": + launch_from_torch(config=config, seed=seed) + elif launcher == "slurm": + launch_from_slurm( + config=config, + host=get_master_node(), + port=master_port, + seed=seed, + ) + else: + assert launcher in ["slurm", "torch"], "launcher only support slurm or torch" + + if args_check: + args_sanity_check() diff --git a/internlm/model/__init__.py b/internlm/model/__init__.py index c8745ce..b0fe77d 100644 --- a/internlm/model/__init__.py +++ b/internlm/model/__init__.py @@ -3,6 +3,7 @@ from .embedding import Embedding1D, RotaryEmbedding from .linear import FeedForward, RewardModelLinear, ScaleColumnParallelLinear +from .metrics import AccPerplex from .modeling_internlm import build_model_with_cfg from .multi_head_attention import MHA from .utils import gather_forward_split_backward @@ -13,6 +14,7 @@ __all__ = [ "RotaryEmbedding", "RewardModelLinear", "ScaleColumnParallelLinear", + "AccPerplex", "MHA", "gather_forward_split_backward", "build_model_with_cfg", diff --git a/internlm/model/embedding.py b/internlm/model/embedding.py index 43e8564..8c59aaf 100644 --- a/internlm/model/embedding.py +++ b/internlm/model/embedding.py @@ -7,13 +7,14 @@ import rotary_emb import torch import torch.nn.functional as F from einops import rearrange +from flash_attn.layers.rotary import ApplyRotaryEmb as LegacyApplyRotaryEmb from flash_attn.layers.rotary import ApplyRotaryEmbQKV_ as LegacyApplyRotaryEmbQKV_ from torch import Tensor, nn from internlm.core.context import ParallelMode from internlm.core.context import global_context as gpc -from .utils import gather_forward_split_backward +from .utils import gather_forward_split_backward, split_forward_gather_backward class Embedding1D(nn.Module): @@ -56,6 +57,9 @@ class Embedding1D(nn.Module): output = gather_forward_split_backward(output_parallel, ParallelMode.TENSOR, dim=-1) + if gpc.config.parallel.sequence_parallel: + output = split_forward_gather_backward(output, ParallelMode.TENSOR, dim=1) + return output @@ -108,6 +112,7 @@ class ApplyRotaryEmbQKV_(torch.autograd.Function): apply_rotary_emb_qkv_ = ApplyRotaryEmbQKV_.apply legacy_apply_rotary_embed_qkv = LegacyApplyRotaryEmbQKV_.apply +legacy_apply_rotary_embed = LegacyApplyRotaryEmb.apply class RotaryEmbedding(torch.nn.Module): @@ -176,7 +181,15 @@ class RotaryEmbedding(torch.nn.Module): self._cos_k_cached = (torch.cos(freqs) / scale).to(x.dtype) self._sin_k_cached = (torch.sin(freqs) / scale).to(x.dtype) - def forward(self, qkv: torch.Tensor, indexes=0) -> Tuple[torch.Tensor, torch.Tensor]: + def forward(self, qkv: torch.Tensor, **kwargs): + if kwargs.get("indexes", None) is not None: + return self._forward(qkv, kwargs.pop("indexes")) + if kwargs.get("inference_params", None) is not None: + return self._eval_forward(qkv, seqlen_offset=kwargs.get("inference_params", None).sequence_len_offset) + else: + return self._eval_forward(qkv) + + def _forward(self, qkv: torch.Tensor, indexes=0) -> Tuple[torch.Tensor, torch.Tensor]: self._update_cos_sin_cache(qkv, indexes) if self.scale is None: return apply_rotary_emb_qkv_(qkv, self._cos_cached[indexes], self._sin_cached[indexes]) @@ -189,7 +202,7 @@ class RotaryEmbedding(torch.nn.Module): self._sin_k_cached[indexes], ) - def eval_forward(self, qkv, seqlen_offset=0): + def _eval_forward(self, qkv, seqlen_offset=0): """ seqlen_offset: can be used in generation where the qkv being passed in is only the last token in the batch. diff --git a/internlm/model/linear.py b/internlm/model/linear.py index 88129af..32f29f8 100644 --- a/internlm/model/linear.py +++ b/internlm/model/linear.py @@ -5,15 +5,13 @@ from typing import Optional import torch import torch.nn.functional as F -from flash_attn.ops.fused_dense import ( - ColumnParallelLinear, - RowParallelLinear, - fused_dense_func, -) +from flash_attn.ops.fused_dense import ColumnParallelLinear, RowParallelLinear +from flash_attn.utils.distributed import all_reduce, reduce_scatter from torch import nn from internlm.core.context import IS_TENSOR_PARALLEL, ParallelMode from internlm.core.context import global_context as gpc +from internlm.model.utils import fused_dense_func_torch class ScaleColumnParallelLinear(nn.Linear): @@ -40,7 +38,6 @@ class ScaleColumnParallelLinear(nn.Linear): out_features: int, process_group: Optional[torch.distributed.ProcessGroup], bias: bool = True, - sequence_parallel: bool = True, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, weight_scale: int = 1, @@ -50,7 +47,6 @@ class ScaleColumnParallelLinear(nn.Linear): raise ValueError(f"out_features ({out_features}) must be divisible by " f"world_size ({world_size})") super().__init__(in_features, out_features // world_size, bias=bias, device=device, dtype=dtype) self.process_group = process_group - self.sequence_parallel = sequence_parallel self.weight_scale = weight_scale def forward(self, input): # pylint: disable=W0622 @@ -61,8 +57,12 @@ class ScaleColumnParallelLinear(nn.Linear): weight = self.weight * self.weight_scale + (1 - self.weight_scale) * self.weight.detach() else: weight = self.weight - return fused_dense_func( - input, weight, self.bias, process_group=self.process_group, sequence_parallel=self.sequence_parallel + return fused_dense_func_torch( + input, + weight, + self.bias, + process_group=self.process_group, + sequence_parallel=gpc.config.parallel.sequence_parallel, ) @@ -89,12 +89,11 @@ class RewardModelLinear(ScaleColumnParallelLinear): out_features: int, process_group: Optional[torch.distributed.ProcessGroup], bias: bool = True, - sequence_parallel: bool = True, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, weight_scale: int = 1, ) -> None: - super().__init__(in_features, out_features, process_group, bias, sequence_parallel, device, dtype, weight_scale) + super().__init__(in_features, out_features, process_group, bias, device, dtype, weight_scale) torch.distributed.broadcast(self.weight, gpc.get_ranks_in_group(ParallelMode.TENSOR)[0], process_group) if bias: torch.distributed.broadcast(self.bias, gpc.get_ranks_in_group(ParallelMode.TENSOR)[0], process_group) @@ -107,11 +106,37 @@ class RewardModelLinear(ScaleColumnParallelLinear): weight = self.weight * self.weight_scale + (1 - self.weight_scale) * self.weight.detach() else: weight = self.weight - return fused_dense_func( - input, weight, self.bias, process_group=self.process_group, sequence_parallel=self.sequence_parallel + return fused_dense_func_torch( + input, + weight, + self.bias, + process_group=self.process_group, + sequence_parallel=gpc.config.parallel.sequence_parallel, ) +class ColumnParallelLinearTorch(ColumnParallelLinear): + def forward(self, x): + # If self.sequence_parallel is True, we're doing Tensor Parallel with sequence parallelism: + # we do an all_gather of x before doing the matmul. + # If not, then the input is already gathered. + + return fused_dense_func_torch( + x, self.weight, self.bias, process_group=self.process_group, sequence_parallel=self.sequence_parallel + ) + + +class RowParallelLinearTorch(RowParallelLinear): + def forward(self, x): + """ + We're doing Tensor Parallel with sequence parallelism: we do the matmul and then + a reduce_scatter of the result. + """ + out = fused_dense_func_torch(x, self.weight, self.bias) + reduce_fn = reduce_scatter if self.sequence_parallel else all_reduce + return reduce_fn(out, self.process_group) + + class FeedForward(nn.Module): """ FeedForward. @@ -143,24 +168,30 @@ class FeedForward(nn.Module): hidden_features = multiple_of * ((hidden_features + multiple_of - 1) // multiple_of) - self.w1 = ColumnParallelLinear( + self.w1 = ColumnParallelLinearTorch( in_features, hidden_features, process_group, bias, - sequence_parallel=False, + sequence_parallel=gpc.config.parallel.sequence_parallel, device=device, dtype=dtype, ) - self.w2 = ColumnParallelLinear( - in_features, hidden_features, process_group, bias, sequence_parallel=False, device=device, dtype=dtype + self.w2 = ColumnParallelLinearTorch( + in_features, + hidden_features, + process_group, + bias, + sequence_parallel=gpc.config.parallel.sequence_parallel, + device=device, + dtype=dtype, ) - self.w3 = RowParallelLinear( + self.w3 = RowParallelLinearTorch( hidden_features, out_features, process_group, bias=bias, - sequence_parallel=False, + sequence_parallel=gpc.config.parallel.sequence_parallel, device=device, dtype=dtype, ) diff --git a/internlm/model/metrics.py b/internlm/model/metrics.py new file mode 100644 index 0000000..1749aa2 --- /dev/null +++ b/internlm/model/metrics.py @@ -0,0 +1,263 @@ +from typing import List + +import torch +from flash_attn.losses.cross_entropy import CrossEntropyLoss as FlashCrossEntropyLoss +from torch_scatter import scatter + +from internlm.core.context import ParallelMode +from internlm.core.context import global_context as gpc +from internlm.utils.parallel import is_no_pp_or_last_stage + + +class AccPerplex: + """ + AccPerplex module for calculating model's accuracy and perplexity metrics. + + Args: + device: The GPU device. + tp_pg: The tensor parallel process group. + dp_pg: The data parallel process group. + tokenizer: For calculating BPB. + dataset_types (List[str]): Various data types that will be used in the current training process, + such as ['en', 'cn', 'code']. The order of the List should be consistent with the type_id specified + in the dataset. Changed parameters need to be used in conjunction with set_current_type_ids(). + """ + + def __init__(self, device, tp_pg, dp_pg, tokenizer=None, dataset_types: List[str] = None): + self.device = device + self.right = torch.Tensor([0]).to(device=device) + self.total = torch.Tensor([0]).to(device=device) + self.total_log_probs = torch.Tensor([0]).to(device=device) + self.tp_pg = tp_pg + self.dp_pg = dp_pg + self.tp_local_rank = torch.distributed.get_rank(self.tp_pg) + self.tokenizer = tokenizer + self.total_bytes = torch.Tensor([0]).to(device=device).view(1) + self.batch_shift = 0 + self.type_ids = None + if dataset_types is not None: + self.dataset_types = dataset_types + self.total_type_count = len(dataset_types) + self.ds_right = torch.zeros(self.total_type_count, dtype=torch.long, device=device) + self.ds_tokens = torch.zeros(self.total_type_count, dtype=torch.long, device=device) + + self.loss_with_type_id = LossWithTypeId(device, dp_pg, dataset_types) + + def set_current_type_ids(self, type_ids: torch.Tensor): + self.batch_shift = 0 + self.type_ids = type_ids.cuda() + + def __call__(self, logits, labels): + return self.update(logits, labels, type_ids=self.type_ids) + + def update(self, logits, labels, type_ids=None): + if gpc.config.model.use_flash_attn: + micro_bsz = labels.size(0) + else: + micro_bsz = 1 + if type_ids is not None: + type_ids = type_ids[self.batch_shift * micro_bsz : (self.batch_shift + 1) * micro_bsz].view(-1) + self.batch_shift += 1 + self.loss_with_type_id.update(logits, labels, type_ids) + + with torch.no_grad(): + if isinstance(logits, (list, tuple)): + logits = logits[0] + + logits = logits.detach().clone() + labels = labels.detach().clone() + + if self.tokenizer: # need to calculate bits per bytes + sequences = self.tokenizer.decode_ids(labels.tolist()) + self.total_bytes += sum(map(lambda x: len(x.encode("utf-8")), sequences)) + + shift_logits = logits.view(-1, logits.size(-1)) + shift_labels = labels.view(-1) + # There is a shift according to the current rank, because the logits are split + pred_shift = self.tp_local_rank * logits.shape[-1] + + logits_max = torch.max(shift_logits, dim=-1)[0] + torch.distributed.all_reduce(logits_max, op=torch.distributed.ReduceOp.MAX, group=self.tp_pg) + # Determine whether the maximum value of the current local tensor is the global maximum value + logits_global = logits_max == torch.max(shift_logits, dim=-1)[0] + + corrects = torch.logical_and( + (shift_labels == (shift_logits.argmax(dim=-1) + pred_shift)), logits_global + ).long() + mask = shift_labels.ne(-100).long() + if hasattr(self, "total_type_count"): + ds_acc = scatter(corrects, type_ids, dim=0, reduce="sum") + token_num_type = scatter(mask, type_ids, dim=0, reduce="sum") + if len(ds_acc) < self.total_type_count: + ds_acc = torch.cat([ds_acc, ds_acc.new_zeros(self.total_type_count - len(ds_acc))]) + token_num_type = torch.cat( + [token_num_type, token_num_type.new_zeros(self.total_type_count - len(token_num_type))] + ) + self.ds_tokens += token_num_type + sync_tensor = ds_acc + torch.distributed.all_reduce(sync_tensor, op=torch.distributed.ReduceOp.SUM, group=self.tp_pg) + self.ds_right += sync_tensor.view(-1) + + acc = corrects.sum() + torch.distributed.all_reduce(acc, op=torch.distributed.ReduceOp.SUM, group=self.tp_pg) + self.right += acc # Masked_fill is not needed here because -100 is not available anyway + self.total += mask.sum() + + # Subtract the maximum value. + shift_logits = shift_logits.sub(logits_max.unsqueeze(dim=-1)) + + # Get the partition's vocab indecies + partition_vocab_size = shift_logits.size()[-1] + vocab_start_index = partition_vocab_size * self.tp_local_rank + vocab_end_index = vocab_start_index + partition_vocab_size + + # Create a mask of valid vocab ids (1 means it needs to be masked). + target_mask = (shift_labels < vocab_start_index) | (shift_labels >= vocab_end_index) + masked_target = shift_labels - vocab_start_index + masked_target[target_mask] = 0 + + # Get predicted-logits = logits[target]. + # For Simplicity, we convert logits to a 2-D tensor with size + # [*, partition-vocab-size] and target to a 1-D tensor of size [*]. + logits_2d = shift_logits.view(-1, partition_vocab_size) + masked_target_1d = masked_target.view(-1) + arange_1d = torch.arange(start=0, end=logits_2d.size()[0], device=logits_2d.device) + predicted_logits_1d = logits_2d[arange_1d, masked_target_1d] + predicted_logits_1d = predicted_logits_1d.clone().contiguous() + predicted_logits = predicted_logits_1d.view_as(shift_labels) # bsz x max_len + predicted_logits[target_mask] = 0.0 + # All reduce is needed to get the chunks from other GPUs. + torch.distributed.all_reduce(predicted_logits, op=torch.distributed.ReduceOp.SUM, group=self.tp_pg) + + pred_exp_logits = torch.exp(predicted_logits) + # Sum of exponential of logits along vocab dimension across all GPUs. + sum_exp_logits = torch.exp(shift_logits).sum(dim=-1) + torch.distributed.all_reduce(sum_exp_logits, op=torch.distributed.ReduceOp.SUM, group=self.tp_pg) + + total_log_probs = -(pred_exp_logits / sum_exp_logits).log().masked_fill(shift_labels.eq(-100), 0).sum() + self.total_log_probs += total_log_probs + + def get_metric(self, reset=True): + if is_no_pp_or_last_stage() and self.dp_pg is not None: + torch.distributed.all_reduce(self.right, op=torch.distributed.ReduceOp.SUM, group=self.dp_pg) + torch.distributed.all_reduce(self.total, op=torch.distributed.ReduceOp.SUM, group=self.dp_pg) + torch.distributed.all_reduce(self.total_log_probs, op=torch.distributed.ReduceOp.SUM, group=self.dp_pg) + if hasattr(self, "total_type_count"): + torch.distributed.all_reduce(self.ds_right, op=torch.distributed.ReduceOp.SUM, group=self.dp_pg) + torch.distributed.all_reduce(self.ds_tokens, op=torch.distributed.ReduceOp.SUM, group=self.dp_pg) + if self.tokenizer: + torch.distributed.all_reduce(self.total_bytes, op=torch.distributed.ReduceOp.SUM, group=self.dp_pg) + + acc = round((self.right / self.total).item(), 4) + perplexity = round(torch.exp(self.total_log_probs / self.total).item(), 4) + bits_per_bytes = round((self.total_log_probs / self.total_bytes).item(), 4) if self.tokenizer else 0 + + if hasattr(self, "total_type_count"): + ds_acc = {} + ds_tokens = {} + for i in range(self.total_type_count): + ds_acc[f"acc/{self.dataset_types[i]}"] = round( + (self.ds_right[i].float() / (self.ds_tokens[i].float() + 1e-5)).item(), 4 + ) + ds_tokens[f"tokens/{self.dataset_types[i]}"] = self.ds_tokens[i].item() + if reset: + self.right.fill_(0) + self.total.fill_(0) + self.total_log_probs.fill_(0) + self.total_bytes.fill_(0) + if hasattr(self, "total_type_count"): + self.ds_right.fill_(0) + self.ds_tokens.fill_(0) + if self.tokenizer is not None: + res = {"acc": acc, "perplexity": perplexity, "BPB": bits_per_bytes} + else: + res = {"acc": acc, "perplexity": perplexity} + if hasattr(self, "total_type_count"): + res.update(ds_acc) + res.update(ds_tokens) + + loss_res = self.loss_with_type_id.get_metric() + res.update(loss_res) + + return res + + +class LossWithTypeId: + """ + Notice the loss value computed here may be not the same with the main info loss, + cause loss here is the reduced result of the data parallel. + """ + + def __init__(self, device, dp_pg, dataset_types: List[str] = None) -> None: + self.device = device + self.dp_pg = dp_pg + + self.loss = torch.Tensor([0.0]).to(device=device) + self.token_num = torch.Tensor([0.0]).to(device=device) + + if dataset_types is not None: + self.dataset_types = dataset_types + self.total_type_count = len(dataset_types) + self.ds_loss = torch.zeros(self.total_type_count, dtype=torch.float, device=device) + self.ds_token_num = torch.zeros(self.total_type_count, dtype=torch.float, device=device) + + self.loss_fn = FlashCrossEntropyLoss( + reduction="none", inplace_backward=True, process_group=gpc.get_group(ParallelMode.TENSOR) + ) + + def update(self, logits, labels, type_ids=None): + with torch.no_grad(): + if isinstance(logits, (list, tuple)): + logits = logits[0] + logits = logits.contiguous().view(-1, logits.size(-1)) + labels = labels.contiguous().view(-1) + loss_list = self.loss_fn(logits, labels) + + cond = labels != -100 + real_loss_list = loss_list[cond] + self.loss += real_loss_list.sum() + self.token_num += real_loss_list.numel() + + if hasattr(self, "total_type_count"): + type_ids = type_ids.contiguous().view(-1).to(self.device) + real_type_ids = type_ids[cond] + + loss_list_type = scatter(real_loss_list, real_type_ids, dim=0, reduce="sum") + token_num_type = scatter(torch.ones_like(real_loss_list), real_type_ids, dim=0, reduce="sum") + + if len(loss_list_type) < self.total_type_count: + loss_list_type = torch.cat( + [loss_list_type, loss_list_type.new_zeros(self.total_type_count - len(loss_list_type))] + ) + token_num_type = torch.cat( + [token_num_type, token_num_type.new_zeros(self.total_type_count - len(token_num_type))] + ) + self.ds_loss += loss_list_type + self.ds_token_num += token_num_type + + def get_metric(self, reset=True): + if is_no_pp_or_last_stage() and self.dp_pg is not None: + torch.distributed.all_reduce(self.loss, op=torch.distributed.ReduceOp.SUM, group=self.dp_pg) + torch.distributed.all_reduce(self.token_num, op=torch.distributed.ReduceOp.SUM, group=self.dp_pg) + if hasattr(self, "total_type_count"): + torch.distributed.all_reduce(self.ds_loss, op=torch.distributed.ReduceOp.SUM, group=self.dp_pg) + torch.distributed.all_reduce(self.ds_token_num, op=torch.distributed.ReduceOp.SUM, group=self.dp_pg) + + loss = round((self.loss / self.token_num).item(), 4) + res = { + "loss_from_metric": loss, + } + if hasattr(self, "total_type_count"): + ds_loss = {} + for i in range(self.total_type_count): + ds_loss[f"loss/{self.dataset_types[i]}"] = round((self.ds_loss[i] / self.ds_token_num[i]).item(), 4) + res.update(ds_loss) + + if reset: + self.loss.fill_(0.0) + self.token_num.fill_(0.0) + if hasattr(self, "total_type_count"): + self.ds_loss.fill_(0.0) + self.ds_token_num.fill_(0.0) + + return res diff --git a/internlm/model/modeling_internlm.py b/internlm/model/modeling_internlm.py index 666ab52..4494959 100644 --- a/internlm/model/modeling_internlm.py +++ b/internlm/model/modeling_internlm.py @@ -5,7 +5,6 @@ import math from typing import Optional import torch -from apex.normalization.fused_layer_norm import MixedFusedRMSNorm as RMSNorm from flash_attn.modules.embedding import ParallelGPT2Embeddings from flash_attn.modules.mlp import ParallelFusedMLP from torch import nn @@ -20,7 +19,7 @@ from internlm.model.linear import ( ScaleColumnParallelLinear, ) from internlm.model.multi_head_attention import MHA -from internlm.model.utils import gather_forward_split_backward +from internlm.model.utils import gather_forward_split_backward, try_import_RMSNorm from internlm.solver.pipeline_utils import partition_uniform from internlm.utils.checkpoint import activation_checkpoint from internlm.utils.common import filter_kwargs @@ -30,6 +29,7 @@ from internlm.utils.registry import MODEL_INITIALIZER MODEL_TYPE = "INTERNLM" logger = get_logger(__file__) +RMSNorm = try_import_RMSNorm() class PackedFlashBaseLayer1D(nn.Module): @@ -49,6 +49,7 @@ class PackedFlashBaseLayer1D(nn.Module): residual_in_fp32 (bool): Whether to use residual in fp32. False by default. device (Optional[Union[str, torch.device]]): The device will be used. norm_type (str): Use RMS norm or layernorm."rmsnorm" by default. + use_flash_attn (bool): Whether use flash-attn. True by default. """ def __init__( @@ -68,12 +69,14 @@ class PackedFlashBaseLayer1D(nn.Module): dropout_selective_checkpoint: bool = True, use_scaled_init: bool = True, use_swiglu: bool = True, + use_flash_attn: bool = True, ): super().__init__() self.checkpoint = checkpoint # dropout selective checkpoint can only be enabled when checkpoint is disabled. self.dropout_selective_checkpoint = dropout_selective_checkpoint is True and checkpoint is False self.layer_idx = layer_idx + self.use_flash_attn = use_flash_attn head_dim = hidden_size // num_attention_heads self.mixer = MHA( @@ -86,8 +89,7 @@ class PackedFlashBaseLayer1D(nn.Module): layer_idx=layer_idx, rotary_emb_dim=head_dim, rotary_emb_scale_base=0, - use_flash_attn=True, - sequence_parallel=False, + use_flash_attn=use_flash_attn, device=device, dtype=dtype, ) @@ -119,7 +121,7 @@ class PackedFlashBaseLayer1D(nn.Module): process_group=gpc.get_group(ParallelMode.TENSOR), bias1=False, bias2=False, - sequence_parallel=False, + sequence_parallel=gpc.config.parallel.sequence_parallel, checkpoint_lvl=0, heuristic="auto", device=device, @@ -243,6 +245,7 @@ class PackedFlashInternLm1D(nn.Module): device (Optional[Union[str, torch.device]]): The device will be used. None by default. residual_in_fp32 (bool): Whether to use residual in fp32. False by default. norm_type (str): Normalization type. Use RMSNorm or LayerNorm. "rmsnorm" by default. + use_flash_attn (bool): Whether to use flash-attn. True by default. """ @@ -271,6 +274,7 @@ class PackedFlashInternLm1D(nn.Module): dropout_selective_checkpoint: bool = True, use_scaled_init: bool = True, use_swiglu: bool = True, + use_flash_attn: bool = True, ): super().__init__() @@ -290,7 +294,7 @@ class PackedFlashInternLm1D(nn.Module): max_position_embeddings=-1, process_group=gpc.get_group(ParallelMode.TENSOR), padding_idx=None, - sequence_parallel=False, + sequence_parallel=gpc.config.parallel.sequence_parallel, device=device, dtype=dtype, ) @@ -317,6 +321,7 @@ class PackedFlashInternLm1D(nn.Module): dropout_selective_checkpoint=dropout_selective_checkpoint, use_scaled_init=use_scaled_init, use_swiglu=use_swiglu, + use_flash_attn=use_flash_attn, ) for lid in range(num_layers) ] @@ -331,7 +336,6 @@ class PackedFlashInternLm1D(nn.Module): out_features=gpc.get_world_size(ParallelMode.TENSOR) if is_reward else vocab_size, process_group=gpc.get_group(ParallelMode.TENSOR), bias=False, - sequence_parallel=False, device=device, dtype=dtype, weight_scale=embed_grad_scale, @@ -397,9 +401,10 @@ def _build_generic_model_1d(num_layers, num_chunks, device=torch.device("cuda"), pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE) pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE) - # all_parts = partition_uniform_with_embed2(num_layers, pipeline_size, num_chunks) all_parts = partition_uniform(num_layers, pipeline_size, num_chunks) parts = all_parts[pipeline_rank] + if gpc.is_rank_for_log(): + logger.info(f"The layer sharding is {all_parts}.") models = [] @@ -445,6 +450,8 @@ def build_model_with_cfg( dropout_selective_checkpoint=True, use_scaled_init: bool = True, use_swiglu: bool = True, + use_flash_attn: bool = True, + sequence_parallel: bool = False, # pylint: disable=W0613 ): """ Builde model with config @@ -474,6 +481,7 @@ def build_model_with_cfg( dropout_selective_checkpoint (bool): It can only be enabled when checkpoint is disabled. True by default. use_scaled_init (bool): Whether to use scaled init. True by default. use_swiglu (bool): Whether to use swiglu. True by default. + use_flash_attn (bool): Whether to use flash-attn. True by default. """ @@ -496,6 +504,7 @@ def build_model_with_cfg( dropout_selective_checkpoint=dropout_selective_checkpoint, use_scaled_init=use_scaled_init, use_swiglu=use_swiglu, + use_flash_attn=use_flash_attn, ) return _build_generic_model_1d(num_layers=num_layers, num_chunks=num_chunks, **cfg) diff --git a/internlm/model/multi_head_attention.py b/internlm/model/multi_head_attention.py index f9e14d7..d634605 100644 --- a/internlm/model/multi_head_attention.py +++ b/internlm/model/multi_head_attention.py @@ -12,12 +12,12 @@ from flash_attn.modules.mha import ( SelfAttention, _update_kv_cache, ) -from flash_attn.ops.fused_dense import ColumnParallelLinear, RowParallelLinear from torch import nn from internlm.core.context import IS_TENSOR_PARALLEL, ParallelMode from internlm.core.context import global_context as gpc from internlm.model.embedding import RotaryEmbedding +from internlm.model.linear import ColumnParallelLinearTorch, RowParallelLinearTorch class MHA(nn.Module): @@ -43,6 +43,7 @@ class MHA(nn.Module): of x will be done before doing the matmul. device (Optional[Union[str, torch.device]]): The device will be used. dtype (Optional[torch.dtype]): The type of data. + use_flash_attn (bool): Whether to use flash-attn. True by default. """ @@ -57,8 +58,7 @@ class MHA(nn.Module): layer_idx: int = None, rotary_emb_dim: int = 0, rotary_emb_scale_base: int = 0, - use_flash_attn: bool = False, - sequence_parallel: bool = True, + use_flash_attn: bool = True, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None, ) -> None: @@ -77,12 +77,12 @@ class MHA(nn.Module): self.rotary_emb = RotaryEmbedding(self.rotary_emb_dim, scale_base=rotary_emb_scale_base, device=device) # notice here should change bias=True - self.Wqkv = ColumnParallelLinear( + self.Wqkv = ColumnParallelLinearTorch( embed_dim, 3 * embed_dim, process_group, bias=True, - sequence_parallel=sequence_parallel, + sequence_parallel=gpc.config.parallel.sequence_parallel, **factory_kwargs, ) # according to https://spaces.ac.cn/archives/9577 @@ -94,8 +94,12 @@ class MHA(nn.Module): ) # output projection always have the bias (for now) - self.out_proj = RowParallelLinear( - embed_dim, embed_dim, process_group, sequence_parallel=sequence_parallel, **factory_kwargs + self.out_proj = RowParallelLinearTorch( + embed_dim, + embed_dim, + process_group, + sequence_parallel=gpc.config.parallel.sequence_parallel, + **factory_kwargs, ) # need to assign tp attribute so that internlm know it is tensor parallel module if gpc.get_world_size(ParallelMode.TENSOR) > 1: @@ -107,9 +111,9 @@ class MHA(nn.Module): if kwargs.get("indexes", None) is not None: return self._packed_forward(x=x, inference_params=inference_params, **kwargs) else: - return self._forward(x=x, seqlen=seqlen, inference_params=inference_params) + return self._forward(x=x, seqlen=seqlen, inference_params=inference_params, **kwargs) - def _forward(self, x, seqlen=None, inference_params=None): + def _forward(self, x, seqlen=None, inference_params=None, **kwargs): """ Arguments: x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if seqlen=None. @@ -124,13 +128,17 @@ class MHA(nn.Module): qkv = rearrange(qkv, "(b s) (three h d) -> b s three h d", s=seqlen, three=3, d=self.head_dim) if self.rotary_emb_dim > 0: - if inference_params is None: - qkv = self.rotary_emb.eval_forward(qkv) - else: - qkv = self.rotary_emb.eval_forward(qkv, seqlen_offset=inference_params.sequence_len_offset) + kwargs["inference_params"] = inference_params + qkv = self.rotary_emb(qkv, **kwargs) if inference_params is None: - context = self.inner_attn(qkv) + if gpc.config.model.dtype is torch.float32 and gpc.config.model.use_flash_attn: + with torch.cuda.amp.autocast(dtype=torch.bfloat16): + if qkv.dtype not in [torch.float16, torch.bfloat16]: + qkv = qkv.to(torch.bfloat16) + context = self.inner_attn(qkv).to(x.dtype) + else: + context = self.inner_attn(qkv) else: q = qkv[:, :, 0] assert self.layer_idx is not None, "Generation requires layer_idx in the constructor" @@ -158,10 +166,18 @@ class MHA(nn.Module): """ qkv = self.Wqkv(x) # total x hsz' qkv = rearrange(qkv, "t (three h d) -> t three h d", three=3, d=self.head_dim) # total x 3 x n_head x d - qkv = self.rotary_emb(qkv, kwargs.pop("indexes")) + qkv = self.rotary_emb(qkv, **kwargs) + kwargs.pop("indexes") if inference_params is None: - context = self.inner_attn(qkv, **kwargs) + if gpc.config.model.dtype is torch.float32 and gpc.config.model.use_flash_attn: + with torch.cuda.amp.autocast(dtype=torch.bfloat16): + if qkv.dtype not in [torch.float16, torch.bfloat16]: + qkv = qkv.to(torch.bfloat16) + context = self.inner_attn(qkv, **kwargs).to(x.dtype) + else: + context = self.inner_attn(qkv, **kwargs) + else: raise RuntimeError("Not support this right now") diff --git a/internlm/model/norm.py b/internlm/model/norm.py new file mode 100644 index 0000000..6598e17 --- /dev/null +++ b/internlm/model/norm.py @@ -0,0 +1,46 @@ +# adopted from https://github.com/NVIDIA/apex/blob/master/apex/normalization/fused_layer_norm + +import numbers + +import torch +from torch.nn import init +from torch.nn.parameter import Parameter + + +def manual_rms_norm(my_input, normalized_shape, weight, eps): + # layer norm should always be calculated in float32 + dims = tuple(i for i in range(-1, -len(normalized_shape) - 1, -1)) + variance = my_input.to(torch.float32).pow(2).mean(dims, keepdim=True) + my_input = my_input * torch.rsqrt(variance + eps) + + if weight is None: + return my_input + + # convert into half-precision if necessary + if weight.dtype in [torch.float16, torch.bfloat16]: + my_input = my_input.to(weight.dtype) + + return weight * my_input + + +class RMSNormTorch(torch.nn.Module): + """A custom PyTorch module for RMS normalization.""" + + def __init__(self, normalized_shape, eps=1e-5): + super().__init__() + + if isinstance(normalized_shape, numbers.Integral): + normalized_shape = (normalized_shape,) + self.normalized_shape = torch.Size(normalized_shape) + self.eps = eps + self.weight = Parameter(torch.empty(*normalized_shape)) + self.reset_parameters() + + def forward(self, _input: torch.Tensor): + return manual_rms_norm(_input, self.normalized_shape, self.weight, self.eps) + + def reset_parameters(self): + init.ones_(self.weight) + + def extra_repr(self): + return "{normalized_shape}, eps={eps}, ".format(**self.__dict__) diff --git a/internlm/model/utils.py b/internlm/model/utils.py index b0d7264..12f80e3 100644 --- a/internlm/model/utils.py +++ b/internlm/model/utils.py @@ -1,9 +1,24 @@ #!/usr/bin/env python # -*- encoding: utf-8 -*- +from typing import Optional + import torch +import torch.nn.functional as F +from flash_attn.ops.fused_dense import FusedDenseFunc +from flash_attn.utils.distributed import ( + all_gather_raw, + all_reduce_raw, + reduce_scatter_raw, +) +from torch import Tensor +from torch.cuda.amp import custom_bwd +from torch.distributed import ProcessGroup from internlm.core.context import global_context as gpc +from internlm.utils.logger import get_logger + +logger = get_logger(__file__) def _split(input_, parallel_mode, dim=-1): @@ -71,3 +86,124 @@ class _GatherForwardSplitBackward(torch.autograd.Function): def gather_forward_split_backward(input_, parallel_mode, dim): return _GatherForwardSplitBackward.apply(input_, parallel_mode, dim) + + +def linear_bias_wgrad_torch(my_input, grad_output, has_d_bias): + assert my_input.dtype == grad_output.dtype + grad_weight = torch.matmul(grad_output.t(), my_input) + grad_bias = grad_output.sum(dim=0) if has_d_bias else None + return grad_weight, grad_bias + + +# adpated from https://github.com/Dao-AILab/flash-attention/blob/main/flash_attn/ops/fused_dense.py +class FusedDenseFuncTorch(FusedDenseFunc): + """A custom PyTorch module extending FusedDenseFunc.""" + + @staticmethod + @custom_bwd + def backward(ctx, grad_output, *args): + grad_output = grad_output.contiguous() + if ctx.return_residual: + (grad_input,) = args + grad_input = grad_input.contiguous() + process_group = ctx.process_group + sequence_parallel = ctx.sequence_parallel + if ctx.compute_weight_gradient: + x, weight = ctx.saved_tensors + if process_group is not None and sequence_parallel: + total_x, handle_x = all_gather_raw(x, process_group, async_op=True) + else: + total_x = x + else: + (weight,) = ctx.saved_tensors + total_x = None + batch_shape = grad_output.shape[:-1] + batch_dim = batch_shape.numel() + grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1]) + if ctx.needs_input_grad[0]: + if not ctx.return_residual: + grad_input = F.linear(grad_output, weight.t()) + else: + grad_input = torch.addmm(grad_input.reshape(batch_dim, grad_input.shape[-1]), grad_output, weight) + grad_input = grad_input.reshape(*batch_shape, grad_input.shape[-1]) + if process_group is not None: + reduce_fn = reduce_scatter_raw if sequence_parallel else all_reduce_raw + grad_input, handle_grad_input = reduce_fn(grad_input, process_group, async_op=True) + else: + grad_input = None + if ctx.needs_input_grad[1]: + assert ctx.compute_weight_gradient + if process_group is not None and sequence_parallel: + handle_x.wait() + # we remove the cuda independence, which is different from flash_attn. + grad_weight, grad_bias = linear_bias_wgrad_torch( + total_x.reshape(batch_dim, total_x.shape[-1]), grad_output, ctx.needs_input_grad[2] + ) + else: + grad_weight = None + grad_bias = grad_output if ctx.needs_input_grad[2] else None + if process_group is not None and ctx.needs_input_grad[0]: + handle_grad_input.wait() + return grad_input, grad_weight, grad_bias, None, None, None + + +def fused_dense_func_torch( + x: Tensor, + weight: Tensor, + bias: Optional[Tensor] = None, + return_residual: bool = False, + process_group: Optional[ProcessGroup] = None, + sequence_parallel: bool = True, +): + dtype_eligible = x.dtype in [torch.float16, torch.bfloat16] or ( + x.dtype == torch.float32 and torch.is_autocast_enabled() + ) + if x.is_cuda and weight.is_cuda and (bias is None or bias.is_cuda) and dtype_eligible: + return FusedDenseFunc.apply(x, weight, bias, return_residual, process_group, sequence_parallel) + else: + return FusedDenseFuncTorch.apply(x, weight, bias, return_residual, process_group, sequence_parallel) + + +class _SplitForwardGatherBackward(torch.autograd.Function): + """ + Split the input and keep only the corresponding chuck to the rank. + + Args: + input_: input matrix. + parallel_mode: parallel mode. + dim: dimension + """ + + @staticmethod + def symbolic(input_): + return _split(input_, parallel_mode=None) + + @staticmethod + def forward(ctx, input_, parallel_mode, dim): + ctx.mode = parallel_mode + ctx.dim = dim + return _split(input_, parallel_mode, dim) + + @staticmethod + def backward(ctx, grad_output): + return _gather(grad_output, ctx.mode, ctx.dim), None, None + + +def split_forward_gather_backward(input_, parallel_mode, dim): + return _SplitForwardGatherBackward.apply(input_, parallel_mode, dim) + + +def try_import_RMSNorm(): + """ + Try import MixFusedRMSNorm from apex, if failed, return our RMSNorm + + """ + try: + from apex.normalization.fused_layer_norm import MixedFusedRMSNorm as RMSNorm + + return RMSNorm + except ModuleNotFoundError: + logger.warning("The torch implementation for MixFusedRMSNorm is slower than apex. Please note this!") + from internlm.model.norm import RMSNormTorch as RMSNorm + + return RMSNorm diff --git a/internlm/monitor/__init__.py b/internlm/monitor/__init__.py new file mode 100644 index 0000000..b100cde --- /dev/null +++ b/internlm/monitor/__init__.py @@ -0,0 +1,4 @@ +from .monitor import initialize_monitor_manager, send_alert_message +from .utils import set_env_var + +__all__ = ["send_alert_message", "initialize_monitor_manager", "set_env_var"] diff --git a/internlm/monitor/alert.py b/internlm/monitor/alert.py new file mode 100644 index 0000000..78b6040 --- /dev/null +++ b/internlm/monitor/alert.py @@ -0,0 +1,53 @@ +import json +import time + +import requests + + +def send_feishu_msg_with_webhook(webhook: str, title: str, message: str): + """ + Use Feishu robot to send messages with the given webhook. + + Args: + webhook (str): The webhook to be used to send message. + title (str): The message title. + message (str): The message body. + + Returns: + The response from the request. Or catch the exception and return None. + + Raises: + Exception: An exception rasied by the HTTP post request. + + """ + + headers = {"Content-Type": "application/json;charset=utf-8"} + msg_body = { + "timestamp": int(time.time()), + "msg_type": "post", + "content": { + "post": { + "zh_cn": { + "title": title, + "content": [ + [ + { + "tag": "text", + "text": message, + }, + ], + ], + }, + }, + }, + } + + try: + res = requests.post(webhook, data=json.dumps(msg_body), headers=headers, timeout=30) + res = res.json() + print(f"Feishu webhook response: {res}") + except Exception as err: # pylint: disable=W0703 + print(f"HTTP Post error: {err}") + res = None + + return res diff --git a/internlm/monitor/monitor.py b/internlm/monitor/monitor.py new file mode 100644 index 0000000..ca5cf55 --- /dev/null +++ b/internlm/monitor/monitor.py @@ -0,0 +1,226 @@ +import os +import signal +import socket +import time +from contextlib import contextmanager +from threading import Thread + +from internlm.core.context import global_context as gpc +from internlm.monitor.alert import send_feishu_msg_with_webhook +from internlm.utils.common import SingletonMeta + +from .utils import get_job_key, set_env_var + + +def send_alert_message(address: str = None, title: str = None, message: str = None): + """ + Send alert messages to the given Feishu webhook address in log rank. + + Args: + address (str): The alert address to be used to send message, defaults to None. + title (str): The message title, defaults to None. + message (str): The message body, defaults to None. + """ + + if address is not None and gpc.is_rank_for_log(): + send_feishu_msg_with_webhook( + webhook=address, + title=title if title else get_job_key(), + message=message, + ) + + +class MonitorTracker(Thread): + """ + Track job status and alert to Feishu during job training. + + Args: + alert_address (str): The Feishu webhook address for sending alerting messages. + check_interval (float): The interval in seconds for monitoring checks. Defaults to 300. + loss_spike_limit (float): The threshold for detecting loss value spikes. Defaults to 1.5. + """ + + def __init__( + self, + alert_address: str, + check_interval: float = 300, + loss_spike_limit: float = 1.5, + ): + super().__init__() + self.alert_address = alert_address + self.check_interval = check_interval + self.loss_spike_limit = loss_spike_limit + self.last_active_time = -1 + self.last_loss_value = -1 + self.stopped = False + self.start() + + def run(self): + """ + start the monitor tracker. + """ + + while not self.stopped: + try: + self._check_stuck() + self._check_loss_spike() + except Exception: + continue + time.sleep(self.check_interval) + + def _check_stuck(self): + """ + Check training status for potential stuck condition. + """ + + new_active_time = -1 + if os.getenv("LAST_ACTIVE_TIMESTAMP") is not None: + new_active_time = os.getenv("LAST_ACTIVE_TIMESTAMP") + if int(new_active_time) <= int(self.last_active_time) and new_active_time != -1: + self._send_alert("Training may be in stuck status, please check it.") + self.last_active_time = new_active_time + + def _check_loss_spike(self): + """ + Check for loss value spikes. + """ + + if gpc.is_rank_for_log(): + new_loss_value = -1 + new_step_id = -1 + if os.getenv("LOSS") is not None: + new_loss_value = os.getenv("LOSS") + if os.getenv("STEP_ID") is not None: + new_step_id = os.getenv("STEP_ID") + + if (float(new_loss_value) / float(self.last_loss_value)) > self.loss_spike_limit and new_loss_value != -1: + assert int(new_step_id) >= 0 + self._send_alert( + f"Checking periodically: Loss spike may be happened in step {new_step_id}, " + f"loss value from {self.last_loss_value} to {new_loss_value}, please check it." + ) + + self.last_loss_value = new_loss_value + + def _send_alert(self, message): + """ + Send alerting message to the Feishu webhook address. + + Args: + message (str): The alerting message to be sent. + """ + + send_alert_message( + address=self.alert_address, + message=message, + ) + + def stop(self): + """ + Stop the monitor tracker. + """ + + self.stopped = True + + +class MonitorManager(metaclass=SingletonMeta): + """ + Monitor Manager for managing monitor thread and monitoring training status. + """ + + def __init__(self, loss_spike_limit: float = 1.5) -> None: + self.monitor_thread = None + self.loss_spike_limit = loss_spike_limit + self.last_step_loss = -1 + + def monitor_loss_spike(self, alert_address: str = None, step_count: int = 0, cur_step_loss: float = 0.0): + """Check loss value, if loss spike occurs, send alert message to Feishu.""" + set_env_var(key="LOSS", value=cur_step_loss) + set_env_var(key="STEP_ID", value=step_count) + + if self.last_step_loss != -1 and cur_step_loss > self.loss_spike_limit * self.last_step_loss: + send_alert_message( + address=alert_address, + message=( + f"Checking step by step: Loss spike may be happened in step {step_count}, " + f"loss value from {self.last_step_loss} to {cur_step_loss}, please check it." + ), + ) + self.last_step_loss = cur_step_loss + + def monitor_exception(self, alert_address: str = None, excp_info: str = None): + """Catch and format exception information, send alert message to Feishu.""" + filtered_trace = excp_info.split("\n")[-10:] + format_trace = "" + for line in filtered_trace: + format_trace += "\n" + line + send_alert_message( + address=alert_address, + message=f"Catch Exception from {socket.gethostname()} with rank id {gpc.get_global_rank()}:{format_trace}", + ) + + def handle_sigterm(self, alert_address: str = None): + """Catch SIGTERM signal, and send alert message to Feishu.""" + + def sigterm_handler(sys_signal, frame): + print("receive frame: ", frame) + print("receive signal: ", sys_signal) + send_alert_message( + address=alert_address, + message=f"Process received signal {signal} and exited.", + ) + + signal.signal(signal.SIGTERM, sigterm_handler) + + def start_monitor( + self, + job_name: str, + alert_address: str, + monitor_interval_seconds: int = 300, + loss_spike_limit: float = 1.5, + ): + """ + Initialize and start monitor thread for checking training job status, loss spike and so on. + + Args: + job_name (str): The training job name. + alert_address (str): The Feishu webhook address for sending alert messages. + monitor_interval_seconds (int): The time of monitor interval in seconds, defaults to 300. + loss_spike_limit (float): The limit multiple of current loss to previous loss value, which means loss spike + may be occurs, defaults to 1.5. + """ + + # initialize some variables for monitoring + set_env_var(key="JOB_NAME", value=job_name) + + # start a monitor thread, periodically check the training status + self.monitor_thread = MonitorTracker( + alert_address=alert_address, + check_interval=monitor_interval_seconds, + loss_spike_limit=loss_spike_limit, + ) + + def stop_monitor(self): + """Stop the monitor and alert thread.""" + if self.monitor_thread is not None: + self.monitor_thread.stop() + + +monitor_manager = MonitorManager() + + +@contextmanager +def initialize_monitor_manager(job_name: str = None, alert_address: str = None): + if alert_address is not None: + try: + monitor_manager.start_monitor(job_name=job_name, alert_address=alert_address) + monitor_manager.handle_sigterm(alert_address=alert_address) + send_alert_message(address=alert_address, message=f"Training in {socket.gethostname()} is starting.") + yield + finally: + send_alert_message( + address=gpc.config.alert_address, message=f"Training in {socket.gethostname()} completed." + ) + monitor_manager.stop_monitor() + else: + yield diff --git a/internlm/monitor/utils.py b/internlm/monitor/utils.py new file mode 100644 index 0000000..f64c7dc --- /dev/null +++ b/internlm/monitor/utils.py @@ -0,0 +1,32 @@ +import os +from datetime import datetime + + +def now_time(): + return datetime.now().strftime("%b%d_%H-%M-%S") + + +def set_env_var(key, value): + os.environ[str(key)] = str(value) + + +def get_job_id(): + job_id = "none" + if os.getenv("SLURM_JOB_ID") is not None: + job_id = os.getenv("SLURM_JOB_ID") + elif os.getenv("K8S_WORKSPACE_ID") is not None: + job_id = os.getenv("K8S_WORKSPACE_ID") + + return job_id + + +def get_job_name(): + job_name = f"unknown-{now_time()}" + if os.getenv("JOB_NAME") is not None: + job_name = os.getenv("JOB_NAME") + + return job_name + + +def get_job_key(): + return f"{get_job_id()}_{get_job_name()}" diff --git a/internlm/solver/optimizer/hybrid_zero_optim.py b/internlm/solver/optimizer/hybrid_zero_optim.py index 3ee2270..8bdeccf 100644 --- a/internlm/solver/optimizer/hybrid_zero_optim.py +++ b/internlm/solver/optimizer/hybrid_zero_optim.py @@ -3,15 +3,15 @@ import math from functools import partial +from itertools import product -import amp_C import torch import torch.distributed as dist -from apex.multi_tensor_apply import multi_tensor_applier from torch.optim import Optimizer from internlm.core.context import Config, ParallelMode from internlm.core.context import global_context as gpc +from internlm.monitor import send_alert_message from internlm.solver.optimizer.store import ( BucketStore, GradientStore, @@ -20,6 +20,7 @@ from internlm.solver.optimizer.store import ( ) from internlm.solver.optimizer.utils import ( DynamicGradScaler, + ParamBcastSyncHandler, flatten, get_grad_accumulate_object, has_inf_or_nan, @@ -28,33 +29,16 @@ from internlm.solver.optimizer.utils import ( split_half_float_double, sync_param, ) -from internlm.utils.common import get_current_device, get_tensor_norm, move_norm_to_cuda +from internlm.utils.common import get_current_device from internlm.utils.logger import get_logger from internlm.utils.megatron_timers import megatron_timer as timer -from internlm.utils.parallel import is_model_parallel_parameter + +from .utils import compute_norm inf = math.inf logger = get_logger(__file__) -def calc_l2_norm(grads): - norm = 0.0 - if len(grads) > 0: - dummy_overflow_buf = torch.cuda.IntTensor([0]) - norm, _ = multi_tensor_applier( - amp_C.multi_tensor_l2norm, dummy_overflow_buf, [grads], False # no per-parameter norm - ) - return norm - - -def calc_lp(grads, norm_type): - norm = 0.0 - for grad in grads: - grad_norm = torch.norm(grad, norm_type) - norm += grad_norm**norm_type - return norm - - class BaseOptimizer(Optimizer): """ Base Optimizer. @@ -105,12 +89,15 @@ class HybridZeroOptimizer(BaseOptimizer): self, optimizer: Optimizer, cpu_offload=False, - overlap_broadcast=False, grad_scal_cfg: Config = None, zero_cfg: Config = None, + param_bcast_sync_handler: ParamBcastSyncHandler = None, ): # DynamicGradScaler related args - initial_scale = grad_scal_cfg.fp16.initial_scale + if gpc.config.model.dtype is torch.float32: + initial_scale = 1 + else: + initial_scale = grad_scal_cfg.fp16.initial_scale min_scale = grad_scal_cfg.fp16.min_scale growth_interval = grad_scal_cfg.fp16.growth_interval growth_factor = grad_scal_cfg.growth_factor @@ -119,9 +106,10 @@ class HybridZeroOptimizer(BaseOptimizer): max_scale = grad_scal_cfg.max_scale # Zero related args - overlap_communication = zero_cfg.zero_overlap_communication reduce_bucket_size = zero_cfg.reduce_bucket_size clip_grad_norm = zero_cfg.clip_grad_norm + self._overlap_sync_grad = zero_cfg.overlap_sync_grad + self._overlap_sync_param = zero_cfg.overlap_sync_param super().__init__(optim=optimizer) @@ -142,7 +130,7 @@ class HybridZeroOptimizer(BaseOptimizer): self._fp32_flat_param_groups_of_current_rank = dict() # communication params - self._overlap_communication = overlap_communication + # self._overlap_communication = overlap_communication self._reduce_bucket_size = reduce_bucket_size # gradient scaler @@ -173,7 +161,12 @@ class HybridZeroOptimizer(BaseOptimizer): + f"zo-{self._zero_local_rank}.pt" ) self.params_per_rank_id_dict = [] - self.overlap_broadcast = overlap_broadcast + self._param_bcast_sync_handler = param_bcast_sync_handler + if self._overlap_sync_param: + assert self._param_bcast_sync_handler is not None + self._broadcast_comm_stream = torch.cuda.Stream() + else: + self._broadcast_comm_stream = torch.cuda.current_stream() # iterate over the param group in the optimizer # partition these param groups for data parallel training @@ -195,6 +188,7 @@ class HybridZeroOptimizer(BaseOptimizer): if len(params) != 0: self._param_store.add_fp16_param_list_by_rank_group(rank, group_id, params) for param in params: + setattr(param, "group_id", group_id) self._param_store.set_param_to_rank(param, rank) # move to cpu to make room to create the flat tensor @@ -240,14 +234,16 @@ class HybridZeroOptimizer(BaseOptimizer): # flag used to skip unnecessary gradient reduce operation when gradient accumulation is enabled. self.skip_grad_reduce = False - # intialize communication stream for - # communication-compuation overlapping - if self._overlap_communication: + # initialize communication stream for + # communication-computation overlapping + if self._overlap_sync_grad: self._comm_stream = torch.cuda.Stream() + else: + self._comm_stream = torch.cuda.current_stream() # reduction hook is only used if overlapping communication # if it is stage 1 without overlapping, no hook will be attached - if self._overlap_communication: + if self._overlap_sync_grad: self._attach_reduction_hook() @property @@ -281,8 +277,10 @@ class HybridZeroOptimizer(BaseOptimizer): global_id = str(i) for j in range(len(param.size())): global_id = "_".join([global_id, str(param.size()[j])]) - - rank_to_go = numel_per_rank.index(min(numel_per_rank)) + if self._overlap_sync_param: + rank_to_go = self._param_bcast_sync_handler.get_rank_by_param(param) + else: + rank_to_go = numel_per_rank.index(min(numel_per_rank)) params_per_rank[rank_to_go].append(param) self.params_per_rank_id_dict[-1][rank_to_go].append(global_id) numel_per_rank[rank_to_go] += param.numel() @@ -313,7 +311,9 @@ class HybridZeroOptimizer(BaseOptimizer): self._grad_store.add_accumulate_grad_object(accum_grad_obj) reduction_func = partial( - self._store_and_try_reduce_grads_by_bucket, param=param, reduce_rank=reduce_rank + self._store_and_try_reduce_grads_by_bucket, + param=param, + reduce_rank=reduce_rank, ) # define hook @@ -334,7 +334,7 @@ class HybridZeroOptimizer(BaseOptimizer): # if full, will reduce the grads already in the bucket # after reduction, the bucket will be empty if self._bucket_store.num_elements_in_bucket(reduce_rank) + param_size > self._reduce_bucket_size: - self._reduce_grads_stored_in_bucket(reduce_rank) + self._reduce_grads_stored_in_bucket(reduce_rank, last_bucket=False) # the param must not be reduced to ensure correctness is_param_reduced = self._param_store.is_param_reduced(param) @@ -352,7 +352,7 @@ class HybridZeroOptimizer(BaseOptimizer): self._bucket_store.add_grad(param.grad, reduce_rank) self._bucket_store.add_param(param, reduce_rank) - def _reduce_grads_stored_in_bucket(self, reduce_rank=None): + def _reduce_grads_stored_in_bucket(self, reduce_rank=None, last_bucket=False): # reduce grads self._reduce_grads_by_rank( reduce_rank=reduce_rank, @@ -360,30 +360,27 @@ class HybridZeroOptimizer(BaseOptimizer): bucket_size=self._bucket_store.num_elements_in_bucket(reduce_rank), ) - # use communication stream if overlapping - # communication with computation - if self._overlap_communication: - stream = self._comm_stream - else: - stream = torch.cuda.current_stream() + params_in_bucket = self._bucket_store.get_param(reduce_rank=reduce_rank) - with torch.cuda.stream(stream): - params_in_bucket = self._bucket_store.get_param(reduce_rank=reduce_rank) + for param in params_in_bucket: + # the is_param_reduced flag should be False showing that + # this param is not reduced before calling self._reduce_grads_by_rank + is_param_reduced = self._param_store.is_param_reduced(param) - for param in params_in_bucket: - # the is_param_reduced flag should be False showing that - # this param is not reduced before calling self._reduce_grads_by_rank - is_param_reduced = self._param_store.is_param_reduced(param) + if is_param_reduced: + msg = ( + f"Parameter of size ({param.size()}) has been reduced, " + + "duplicate reduction will lead to arithmetic incorrectness" + ) + raise RuntimeError(msg) - if is_param_reduced: - msg = ( - f"Parameter of size ({param.size()}) has been reduced, " - + "duplicate reduction will lead to arithmetic incorrectness" - ) - raise RuntimeError(msg) + # update the flag + self._param_store.set_param_reduction_state(param, True) - # update the flag - self._param_store.set_param_reduction_state(param, True) + if self._param_store.belongs_to_current_rank(param): + self._param_store.add_reduced_param_for_compute_norm(param, last_bucket) + else: + self._param_store.add_previous_reduced_param(param) self._bucket_store.reset_by_rank(reduce_rank) @@ -401,17 +398,17 @@ class HybridZeroOptimizer(BaseOptimizer): self._reduce_and_copy(bucket=param_bucket, reduce_rank=reduce_rank) def _reduce_and_copy(self, bucket: TensorBucket, reduce_rank): - if self._overlap_communication: - torch.cuda.synchronize() + if self._overlap_sync_grad: + self._comm_stream.synchronize() self._param_store.clear_grads_of_previous_reduced_params() - stream = self._comm_stream - else: - stream = torch.cuda.current_stream() - with torch.cuda.stream(stream): + with torch.cuda.stream(self._comm_stream): flat = bucket.flatten() reduced_flat = reduce_tensor( - tensor=flat, dtype=self.dtype, dst_rank=reduce_rank, parallel_mode=ParallelMode.DATA + tensor=flat, + dtype=self.dtype, + dst_rank=reduce_rank, + parallel_mode=ParallelMode.DATA, ) # update the reduced tensor @@ -438,6 +435,7 @@ class HybridZeroOptimizer(BaseOptimizer): reduction_states = self._param_store.get_param_reduction_states() for tensor, _ in reduction_states.items(): reduction_states[tensor] = False + self._param_store.reset_reduced_data_for_compute_norm() # accumulate gradient avg_gradients = self._grad_store._averaged_gradients @@ -486,6 +484,30 @@ class HybridZeroOptimizer(BaseOptimizer): # Gradients may not be fully synchronized here. + def _compute_norm_with_stage( + self, + group_id: int = 0, + last_bucket: bool = False, + last_stage: bool = False, + previous_norm=None, + ): + # compute norm for gradients that have been reduced + params, grads = self._param_store.get_reduced_param_for_compute_norm(group_id=group_id, last_bucket=last_bucket) + if len(params) == 0: + grads = [self.padding_grad] + params = [self.padding_tensor] + + if self._clip_grad_norm > 0: + # this norm is before scaling, it will be very large + norm = compute_norm( + gradients=grads, + parameters=params, + last_stage=last_stage, + previous_norm=previous_norm, + ) + + return norm + def step(self, closure=None): """Performs a single optimization step. @@ -497,88 +519,92 @@ class HybridZeroOptimizer(BaseOptimizer): """ assert closure is None, "closure is not supported by step()" - timer("sync_grad").start() # if not overlapping communication (no reduction hook is attached) # we need to manually reduce these gradients - if not self._overlap_communication: + if not self._overlap_sync_grad: for group_id in range(len(self._fp16_param_groups)): for param in self._fp16_param_groups[group_id]: if param.grad is not None: self._store_and_try_reduce_grads_by_bucket(param) # we need to reduce the gradients left in the communication bucket - self._reduce_grads_stored_in_bucket() + self._reduce_grads_stored_in_bucket(reduce_rank=None, last_bucket=True) + + # compute norm for gradients in the before bucket + groups_norms = [] + for group_id in range(self.num_param_groups): + groups_norms.append(self._compute_norm_with_stage(group_id=group_id)) # clear reduced grads - if self._overlap_communication: - torch.cuda.synchronize() + if self._overlap_sync_grad: + # grads in the last bucket is reduced + self._comm_stream.synchronize() self._param_store.clear_grads_of_previous_reduced_params() + # compute norm for gradients in the last bucket + total_norms = [] + for group_id in range(self.num_param_groups): + total_norms.append( + self._compute_norm_with_stage( + group_id=group_id, + last_bucket=True, + last_stage=True, + previous_norm=groups_norms[group_id], + ) + ) + + timer("sync_grad").start() self._sync_grad() timer("sync_grad").stop() - return self._step(closure=closure) + return self._step(closure=closure, norms=total_norms) - def _step(self, closure=None): + def _step(self, closure=None, norms=None): assert closure is None, "closure is not supported by step()" # check for overflow - found_inf = self._check_overflow() + found_inf = False + # if there is INF values in grades, compute_norm func would also returns -1 + # thus, we try to avoid call _check_overflow here + # found_inf = self._check_overflow() # Because you may encounter inf when computing norm - timer("cal_norm").start() - norm_groups = [] - for group_id in range(self.num_param_groups): - # compute norm - if self._zero_local_rank not in self.param_group_no_params_ranks[group_id]: - gradients = self._grad_store.get_averaged_gradients_by_group(group_id) - parameters = self._param_store.get_fp16_params_by_rank_group( - group_id=group_id, rank=self._zero_local_rank - ) - else: - # in order to prevent collection communication from hanging, - # we need to involve rank that are not assigned parameters in compute_norm(), - # so we give them a fp16 vector of 0 values. - gradients = [self.padding_grad] - parameters = [self.padding_tensor] - if self._clip_grad_norm > 0: - # this norm is before scaling, it will be very large - norm_group = compute_norm( - gradients=gradients, - parameters=parameters, - ) - if norm_group == -1: - timer("cal_norm").stop() - found_inf = True - break - norm_groups.append(norm_group) + if -1 in norms: + found_inf = True loss_scale = float(self.loss_scale.item()) # backup - self.grad_scaler.update(found_inf) + if gpc.config.model.dtype is not torch.float32: + self.grad_scaler.update(found_inf) # update loss scale if overflow occurs if found_inf: if gpc.is_rank_for_log(): logger.warning("Overflow occurs, please check it.") + send_alert_message( + address=gpc.config.alert_address, + message="Overflow occurs, please check it.", + ) self._grad_store._averaged_gradients = dict() self.zero_grad() - return False, None + return False, norms # copy the grad of fp16 param to fp32 param single_grad_partition_groups = [] - global_norm = 0 for group_id in range(self.num_param_groups): # compute norm # The following operations are performed only on the rank to which parameters are assigned. if not self.param_group_has_params[group_id]: continue - gradients = self._grad_store.get_averaged_gradients_by_group(group_id) # create flat gradient for the flat fp32 params - fp16_avg_grads = gradients - flat_fp16_avg_grads = flatten(fp16_avg_grads) + gradients = self._grad_store.get_averaged_gradients_by_group(group_id) + with torch.no_grad(): + flat_fp16_avg_grads = flatten(gradients) + self._grad_store.reset_average_gradients_by_group(group_id) + gradients = None # release cuda memory dtype = self._fp32_flat_param_groups_of_current_rank[group_id].dtype flat_fp32_avg_grads = flat_fp16_avg_grads.to(dtype) + flat_fp16_avg_grads = None # release cuda memory param_shape = self._fp32_flat_param_groups_of_current_rank[group_id].shape assert ( @@ -588,19 +614,19 @@ class HybridZeroOptimizer(BaseOptimizer): single_grad_partition_groups.append(flat_fp32_avg_grads) device = self._fp32_flat_param_groups_of_current_rank[group_id].device self._fp32_flat_param_groups_of_current_rank[group_id].grad = flat_fp32_avg_grads.to(device) - self._grad_store._averaged_gradients[group_id] = [] - self._grad_store._averaged_gradients[group_id] = [] # unscale and clip grads # get the global norm + global_norm_groups = [] if self._clip_grad_norm > 0: - global_norm = sum(norm_groups) ** 0.5 + for norm in norms: + global_norm_groups.append(norm**0.5) # the following operations are performed only on the rank to which parameters are assigned. - if len(single_grad_partition_groups) != 0: - self._unscale_and_clip_grads(single_grad_partition_groups, global_norm, loss_scale) + if gpc.config.model.dtype is not torch.float32: + if len(single_grad_partition_groups) != 0: + self._unscale_and_clip_grads(single_grad_partition_groups, global_norm_groups, loss_scale) - timer("cal_norm").stop() # update the parameters timer("step").start() @@ -619,35 +645,40 @@ class HybridZeroOptimizer(BaseOptimizer): fp32_param = self._fp32_flat_param_groups_of_current_rank[group_id] fp16_param.data.copy_(fp32_param) - # TODO: support broadcast overlap - self.broadcast_params(overlap=False) + with torch.cuda.stream(self._broadcast_comm_stream): + self.broadcast_params() timer("step").stop() + # update gradients may not be needed here, because the sync_params function is used in initialization, # so synchronization is maintained - return True, global_norm / loss_scale + return True, [global_norm / loss_scale for global_norm in global_norm_groups] - def broadcast_params(self, overlap=False): + def broadcast_params(self): handles = [] - for group_id in range(self.num_param_groups): - for rank in range(self._zero_world_size): - # The following operations are performed only on the rank to which parameters are assigned. - if rank not in self.param_group_no_params_ranks[group_id]: - fp16_param = self._param_store.get_flat_fp16_param_by_rank_group(rank=rank, group_id=group_id) - # grank = gpc.get_ranks_in_group(group_type)[rank] # need to convert to the global rank - # assert grank == rank, f"{grank} == {rank}" - g_rank = gpc.get_ranks_in_group(self._broadcast_parallel_mode)[rank] - handle = dist.broadcast( - fp16_param, src=g_rank, group=gpc.get_group(ParallelMode.ZERO1), async_op=True - ) - handles.append(handle) + for rank, group_id in product(range(self._zero_world_size), range(self.num_param_groups)): + # The following operations are performed only on the rank to which parameters are assigned. + if rank in self.param_group_no_params_ranks[group_id]: + continue + fp16_param = self._param_store.get_flat_fp16_param_by_rank_group(rank=rank, group_id=group_id) + # grank = gpc.get_ranks_in_group(group_type)[rank] # need to convert to the global rank + # assert grank == rank, f"{grank} == {rank}" + g_rank = gpc.get_ranks_in_group(self._broadcast_parallel_mode)[rank] + handle = dist.broadcast( + fp16_param, + src=g_rank, + group=gpc.get_group(ParallelMode.ZERO1), + async_op=True, + ) - if not overlap: - for handle in handles: - handle.wait() - else: - return handles + if self._overlap_sync_param: + self._param_bcast_sync_handler.add_bcast_handle(rank, handle) + else: + handles.append(handle) + + for handle in handles: + handle.wait() ################## # FP16 Utilities # @@ -665,22 +696,28 @@ class HybridZeroOptimizer(BaseOptimizer): if avg_grad is not None and has_inf_or_nan(avg_grad): self._found_overflow.fill_(1.0) break - dist.all_reduce(self._found_overflow, op=dist.ReduceOp.MAX, group=gpc.get_group(ParallelMode.GLOBAL)) + dist.all_reduce( + self._found_overflow, + op=dist.ReduceOp.MAX, + group=gpc.get_group(ParallelMode.GLOBAL), + ) return self._found_overflow.item() > 0 - def _unscale_and_clip_grads(self, grad_groups_flat, total_norm, loss_scale): + def _unscale_and_clip_grads(self, grad_groups_flat, total_norm_groups, loss_scale): # compute combined scale factor for this group - combined_scale = loss_scale + combined_scale_groups = [] if self._clip_grad_norm > 0.0: # norm is in fact norm*scale - clip = ((total_norm / loss_scale) + 1e-6) / self._clip_grad_norm - if clip > 1.0: - combined_scale = clip * loss_scale + for group_id, total_norm in enumerate(total_norm_groups): + combined_scale_groups.append(loss_scale) + clip = ((total_norm / loss_scale) + 1e-6) / self._clip_grad_norm + if clip > 1.0: + combined_scale_groups[group_id] = clip * loss_scale - for grad in grad_groups_flat: - grad.data.mul_(1.0 / combined_scale) + for group_id, grad in enumerate(grad_groups_flat): + grad.data.mul_(1.0 / combined_scale_groups[group_id]) def clip_grad_norm(self, model, max_norm): # will conduct in the step() @@ -733,87 +770,3 @@ class HybridZeroOptimizer(BaseOptimizer): if "zero_devide_optim_plan" in states: self.params_per_rank_id_dict = states["zero_devide_optim_plan"] - - -def compute_norm(gradients, parameters, norm_type=2): - """Get the norm - Arguments: - gradients (Iterable[Tensor]): The gradient value. - parameters (Iterable[Tensor]): The parameter each gradient corresponds to. - norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for - infinity norm. - - Returns: - Total norm of the parameters, need total_norm**(1/norm) before using. - """ - - enable_cuda_kernels = gradients[0].device.type == "cuda" - # Norm parameters. - norm_type = float(norm_type) - - # Calculate norm. - if norm_type == inf: - total_norm = max(g.data.abs().max() for g in gradients) - total_norm_cuda = torch.FloatTensor([float(total_norm)], device=gradients[0].device) - # Take max across all model-parallel GPUs. - if gpc.get_world_size(ParallelMode.MODEL) > 1: - dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=gpc.get_group(ParallelMode.MODEL)) - total_norm = total_norm_cuda[0].item() - else: - tensor_parallel_grads = [] - for g, p in zip(gradients, parameters): - # TODO: consider the pipeline shared parameter - if ( - gpc.is_initialized(ParallelMode.PIPELINE) - and hasattr(p, "pipeline_shared_module_pg") - and dist.get_rank(p.pipeline_shared_module_pg) == 0 - ): # if shared between different pipe, only count o - tensor_parallel_grads.append(g.data.float()) - elif ( - gpc.is_initialized(ParallelMode.PIPELINE) - and hasattr(p, "pipeline_shared_module_pg") - and dist.get_rank(p.pipeline_shared_module_pg) != 0 - ): - continue - elif ( - gpc.is_initialized(ParallelMode.TENSOR) - and not is_model_parallel_parameter(p) - and gpc.get_local_rank(ParallelMode.TENSOR) == 0 - ): # if not used in each chunk, such as layernorm - tensor_parallel_grads.append(g.data.float()) - elif is_model_parallel_parameter(p): - tensor_parallel_grads.append(g.data.float()) - elif gpc.get_local_rank(ParallelMode.TENSOR) != 0: - continue - else: - raise RuntimeError("Should not arrive here") - - if norm_type == 2.0 and enable_cuda_kernels: - tensor_parallel_norm = calc_l2_norm(tensor_parallel_grads) ** norm_type - else: - tensor_parallel_norm = calc_lp(tensor_parallel_grads, norm_type) - - # If norm is type of float, then we convert them into torch.Tensor. - tensor_parallel_norm = get_tensor_norm(tensor_parallel_norm, enable_cuda_kernels) - # If grads are on CPU, the norms is also on CPU. Cast them to CUDA tensors - if not enable_cuda_kernels: - tensor_parallel_norm = move_norm_to_cuda(tensor_parallel_norm) - - total_norm = tensor_parallel_norm - - # Sum across all model-parallel GPUs. - if gpc.is_initialized(ParallelMode.MODEL): - dist.all_reduce(total_norm, op=dist.ReduceOp.SUM, group=gpc.get_group(ParallelMode.MODEL)) - - # This is because we use zero1, so we need to use this reduction. - # TODO: Check zero group to be a subset of dp group. - dist.all_reduce(total_norm, op=dist.ReduceOp.SUM, group=gpc.get_group(ParallelMode.ZERO1)) - - if torch.is_tensor(total_norm): - total_norm = total_norm.item() - - # Scale. - if total_norm == float("inf") or total_norm == -float("inf"): - total_norm = -1 - - return total_norm diff --git a/internlm/solver/optimizer/store.py b/internlm/solver/optimizer/store.py index 2ef2e4f..05a44d2 100644 --- a/internlm/solver/optimizer/store.py +++ b/internlm/solver/optimizer/store.py @@ -152,6 +152,11 @@ class ParameterStore(BaseStore): self._is_param_reduced = dict() self._reduced_param = [] + self._former_bucket_reduced_param = {} + self._last_bucket_reduced_param = {} + self._former_bucket_reduced_grad = {} + self._last_bucket_reduced_grad = {} + def set_param_to_rank(self, tensor: Tensor, rank: int) -> None: """ Set the mapping between parameter to rank, each parameter should be owned by a rank. @@ -223,6 +228,39 @@ class ParameterStore(BaseStore): def add_previous_reduced_param(self, tensor): self._reduced_param.append(tensor) + def add_reduced_param_for_compute_norm(self, param, last_bucket=False): + group_id = getattr(param, "group_id") + if last_bucket: + if group_id not in self._last_bucket_reduced_param: + self._last_bucket_reduced_param[group_id] = [] + self._last_bucket_reduced_grad[group_id] = [] + + self._last_bucket_reduced_param[group_id].append(param) + self._last_bucket_reduced_grad[group_id].append(param.grad) + else: + if group_id not in self._former_bucket_reduced_param: + self._former_bucket_reduced_param[group_id] = [] + self._former_bucket_reduced_grad[group_id] = [] + + self._former_bucket_reduced_param[group_id].append(param) + self._former_bucket_reduced_grad[group_id].append(param.grad) + + def get_reduced_param_for_compute_norm(self, group_id=0, last_bucket=False): + if not last_bucket: + if group_id not in self._former_bucket_reduced_param: + return [], [] + return self._former_bucket_reduced_param[group_id], self._former_bucket_reduced_grad[group_id] + else: + if group_id not in self._last_bucket_reduced_param: + return [], [] + return self._last_bucket_reduced_param[group_id], self._last_bucket_reduced_grad[group_id] + + def reset_reduced_data_for_compute_norm(self): + self._former_bucket_reduced_param = {} + self._last_bucket_reduced_param = {} + self._former_bucket_reduced_grad = {} + self._last_bucket_reduced_grad = {} + def clear_grads_of_previous_reduced_params(self): if len(self._reduced_param) > 0: for param in self._reduced_param: diff --git a/internlm/solver/optimizer/utils.py b/internlm/solver/optimizer/utils.py index f28cb8f..38e4560 100644 --- a/internlm/solver/optimizer/utils.py +++ b/internlm/solver/optimizer/utils.py @@ -1,20 +1,37 @@ #!/usr/bin/env python # -*- encoding: utf-8 -*- +import math from abc import ABC, abstractmethod -from typing import Dict, Optional +from collections import OrderedDict +from functools import partial +from typing import Any, Dict, Optional, Union import torch import torch.distributed as dist -from torch import Tensor +from torch import Tensor, nn from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from internlm.core.context import ParallelMode from internlm.core.context import global_context as gpc +from internlm.core.naive_amp import NaiveAMPModel +from internlm.utils.common import get_tensor_norm, move_norm_to_cuda from internlm.utils.logger import get_logger +from internlm.utils.parallel import is_model_parallel_parameter logger = get_logger(__file__) +try: + import amp_C + from apex.multi_tensor_apply import multi_tensor_applier + + APEX_AVAILABLE = True +except (ModuleNotFoundError, ImportError): + logger.warning("The torch implementation for cal_l2norm is slower than apex. Please note this!") + APEX_AVAILABLE = False + +inf = math.inf + def flatten(input_): return _flatten_dense_tensors(input_) @@ -46,12 +63,19 @@ def get_grad_accumulate_object(tensor): def split_half_float_double(tensor_list): - dtypes = ["torch.cuda.HalfTensor", "torch.cuda.FloatTensor", "torch.cuda.DoubleTensor", "torch.cuda.BFloat16Tensor"] - buckets = [] - for _, dtype in enumerate(dtypes): - bucket = [t for t in tensor_list if t.type() == dtype] - if bucket: - buckets.append(bucket) + dtype_buckets = { + "torch.cuda.HalfTensor": [], + "torch.cuda.FloatTensor": [], + "torch.cuda.DoubleTensor": [], + "torch.cuda.BFloat16Tensor": [], + } + + for t in tensor_list: + dtype = t.type() + if dtype in dtype_buckets: + dtype_buckets[dtype].append(t) + + buckets = [bucket for bucket in dtype_buckets.values() if bucket] return buckets @@ -150,6 +174,149 @@ def sync_param(flat_tensor, tensor_list): p.data = q.data +def multi_tensor_l2norm_torch(tensor_list, per_tensor): + # Convert tensor_list elements to torch.float32 + tensor_list = [tensor.float() for tensor in tensor_list] + norms_tensor = torch.stack([torch.norm(tensor, p=2) for tensor in tensor_list]) + l2_norm = torch.norm(norms_tensor, p=2).unsqueeze(0) + + if per_tensor: + per_tensor_norm = norms_tensor + else: + per_tensor_norm = torch.Tensor([]).to(norms_tensor.device) + + return l2_norm, per_tensor_norm + + +def calc_l2_norm(grads): + norm = 0.0 + if len(grads) > 0: + if APEX_AVAILABLE: + dummy_overflow_buf = torch.cuda.IntTensor([0]) + norm, _ = multi_tensor_applier( + amp_C.multi_tensor_l2norm, + dummy_overflow_buf, + [grads], + False, # no per-parameter norm + ) + else: + norm, _ = multi_tensor_l2norm_torch(grads, False) + return norm + + +def calc_lp(grads, norm_type): + norm = 0.0 + for grad in grads: + grad_norm = torch.norm(grad, norm_type) + norm += grad_norm**norm_type + return norm + + +def compute_norm(gradients, parameters, last_stage=False, previous_norm=None, norm_type=2): + """Get the norm + Arguments: + gradients (Iterable[Tensor]): The gradient value. + parameters (Iterable[Tensor]): The parameter each gradient corresponds to. + norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for + infinity norm. + + Returns: + Total norm of the parameters, need total_norm**(1/norm) before using. + """ + + enable_cuda_kernels = gradients[0].device.type == "cuda" + # Norm parameters. + norm_type = float(norm_type) + + # Calculate norm. + if norm_type == inf: + total_norm = max(g.data.abs().max() for g in gradients) + total_norm_cuda = torch.FloatTensor([float(total_norm)], device=gradients[0].device) + + if last_stage is False: + return total_norm_cuda + + if previous_norm is not None: + total_norm_cuda = max(total_norm_cuda, previous_norm) + + # Take max across all model-parallel GPUs. + if gpc.get_world_size(ParallelMode.MODEL) > 1: + dist.all_reduce( + total_norm_cuda, + op=dist.ReduceOp.MAX, + group=gpc.get_group(ParallelMode.MODEL), + ) + total_norm = total_norm_cuda[0].item() + else: + tensor_parallel_grads = [] + for g, p in zip(gradients, parameters): + # TODO: consider the pipeline shared parameter + if ( + gpc.is_initialized(ParallelMode.PIPELINE) + and hasattr(p, "pipeline_shared_module_pg") + and dist.get_rank(p.pipeline_shared_module_pg) == 0 + ): # if shared between different pipe, only count o + tensor_parallel_grads.append(g.data.float()) + elif ( + gpc.is_initialized(ParallelMode.PIPELINE) + and hasattr(p, "pipeline_shared_module_pg") + and dist.get_rank(p.pipeline_shared_module_pg) != 0 + ): + continue + elif ( + gpc.is_initialized(ParallelMode.TENSOR) + and not is_model_parallel_parameter(p) + and gpc.get_local_rank(ParallelMode.TENSOR) == 0 + ): # if not used in each chunk, such as layernorm + tensor_parallel_grads.append(g.data.float()) + elif is_model_parallel_parameter(p): + tensor_parallel_grads.append(g.data.float()) + elif gpc.get_local_rank(ParallelMode.TENSOR) != 0: + continue + else: + raise RuntimeError("Should not arrive here") + + if norm_type == 2.0 and enable_cuda_kernels: + tensor_parallel_norm = calc_l2_norm(tensor_parallel_grads) ** norm_type + else: + tensor_parallel_norm = calc_lp(tensor_parallel_grads, norm_type) + + # If norm is type of float, then we convert them into torch.Tensor. + tensor_parallel_norm = get_tensor_norm(tensor_parallel_norm, enable_cuda_kernels) + # If grads are on CPU, the norms is also on CPU. Cast them to CUDA tensors + if not enable_cuda_kernels: + tensor_parallel_norm = move_norm_to_cuda(tensor_parallel_norm) + + total_norm = tensor_parallel_norm + + if last_stage is False: + return total_norm + + if previous_norm is not None: + total_norm = total_norm + previous_norm + + # Sum across all model-parallel GPUs. + if gpc.is_initialized(ParallelMode.MODEL): + dist.all_reduce( + total_norm, + op=dist.ReduceOp.SUM, + group=gpc.get_group(ParallelMode.MODEL), + ) + + # This is because we use zero1, so we need to use this reduction. + # TODO: Check zero group to be a subset of dp group. + dist.all_reduce(total_norm, op=dist.ReduceOp.SUM, group=gpc.get_group(ParallelMode.ZERO1)) + + if torch.is_tensor(total_norm): + total_norm = total_norm.item() + + # Scale. + if total_norm == float("inf") or total_norm == -float("inf"): + total_norm = -1 + + return total_norm + + class BaseGradScaler(ABC): """A base class for the gradient scaler. @@ -313,3 +480,90 @@ class DynamicGradScaler(BaseGradScaler): self._scale = self._scale.fill_(state_dict["_scale"]) self._growth_step = state_dict["_growth_step"] self._hysteresis_step = state_dict["_hysteresis_step"] + + +class ParamBcastSyncHandler: + """ + Model Partition Handler for overlap broadcast with forward + """ + + def __init__(self, model: Union[nn.Module, nn.ModuleList]) -> None: + self._block_to_param = OrderedDict() # + self._param_to_rank = dict() # + self._block_to_rank = dict() # + self._bcast_handles = dict() # + + zero1_size = gpc.get_world_size(ParallelMode.ZERO1) + total_param_num = sum(p.numel() for p in model.parameters()) + avg_param_num = total_param_num * 1.0 // zero1_size + + # just want to share same for loop for ModuleList and Module + if not isinstance(model, nn.ModuleList): + model = [model] + + # record the parameters to transformer/embeding/head/norm block + for _chunk in model: + if isinstance(_chunk, NaiveAMPModel): + _chunk = _chunk.model + + for _, children in _chunk.named_children(): + # should be the transformer block definaton in modeling_xxx.py + if isinstance(children, nn.ModuleList): + # record the block that a parameter belongs to + for _, block in enumerate(children): + # self._block_to_param[f"{name}.{idx}"] = list(block.parameters()) + self._block_to_param[block] = list(block.parameters()) + else: + # record the block that a parameter belongs to + # self._block_to_param[name] = list(children.parameters()) + self._block_to_param[children] = list(children.parameters()) + + alloc_num = 0 + rank_to_go = 0 + + # process the parameters in block_to_param sequencially, + # allocate each parameter to a local rank of ParallelMode.ZERO1, + # NOTE that we do NOT consider following scenarios: + # 1) whether a parameter is trainable; + # 2) paramters maybe in different optimizer group + for block, params in self._block_to_param.items(): + # allocate a model block to a local rank of ParallelMode.ZERO1 + self._block_to_rank[block] = [rank_to_go] + for p in params: + alloc_num = alloc_num + p.numel() + # in this case, allocate the param to next rank if possible + if alloc_num > avg_param_num * 1.01 and rank_to_go < zero1_size - 1: + rank_to_go = rank_to_go + 1 + alloc_num = 0 + self._block_to_rank[block].append(rank_to_go) + # allocate a parameter to a local rank of ParallelMode.ZERO1 + self._param_to_rank[p] = rank_to_go + + # initialize an empty list for _bcast_handles of each rank + for rank in range(gpc.get_world_size(ParallelMode.ZERO1)): + self._bcast_handles[rank] = [] + + # register_forward_pre_hook for transformer/embeding/norm/xxx block + self._register_sync_parameters_hook() + + def _register_sync_parameters_hook(self) -> None: + def _pre_forward_hook(model: nn.Module, inputs: Any): # pylint: disable=W0613 + bcast_handles = [] + # gather all required broadcast hanles into a list + for rank in self._block_to_rank[model]: + bcast_handles.extend(self._bcast_handles[rank]) + # need to clear _bcast_handles since they would be processed later + self._bcast_handles[rank] = [] + # wait all required broadcast handles to be completed + for handle in bcast_handles: + handle.wait() + + # register_forward_pre_hook for transformer/embeding/norm/xxx block + for block, _ in self._block_to_rank.items(): + block.register_forward_pre_hook(partial(_pre_forward_hook)) + + def get_rank_by_param(self, param) -> int: + return self._param_to_rank[param] + + def add_bcast_handle(self, rank, handle) -> None: + self._bcast_handles[rank].append(handle) diff --git a/internlm/train/__init__.py b/internlm/train/__init__.py new file mode 100644 index 0000000..457d7a4 --- /dev/null +++ b/internlm/train/__init__.py @@ -0,0 +1,19 @@ +from .training_internlm import ( + get_train_data_loader, + get_validation_data_loader, + initialize_llm_profile, + initialize_model, + initialize_optimizer, + load_new_batch, + record_current_batch_training_metrics, +) + +__all__ = [ + "get_train_data_loader", + "get_validation_data_loader", + "initialize_llm_profile", + "initialize_model", + "initialize_optimizer", + "load_new_batch", + "record_current_batch_training_metrics", +] diff --git a/internlm/train/training_internlm.py b/internlm/train/training_internlm.py new file mode 100644 index 0000000..bab56f1 --- /dev/null +++ b/internlm/train/training_internlm.py @@ -0,0 +1,414 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import time +from functools import partial +from typing import Callable, Iterable, Union + +import torch +import torch.distributed as dist +from torch import nn +from torch.utils.data import ConcatDataset, DataLoader + +from internlm.core.context import ParallelMode +from internlm.core.context import global_context as gpc +from internlm.core.naive_amp import NaiveAMPModel +from internlm.core.trainer import TrainState +from internlm.data.batch_sampler import StaticBatchSampler, get_dpsampler_dataloader +from internlm.data.collaters import jsonl_ds_collate_fn, packed_collate_fn +from internlm.data.dataset import get_dataset_dict +from internlm.data.dummy_dataset import RandomDataset +from internlm.data.packed_dataset import ( + PackedDataset, + PackedDatasetWithoutCuSeqlen, + get_packed_dataset_without_short_length, +) +from internlm.data.utils import DATASET_TYPE_IDS_MAP, unpack_data +from internlm.monitor import set_env_var +from internlm.monitor.monitor import monitor_manager as mm +from internlm.solver.beta2_scheduler import Beta2Scheduler +from internlm.solver.lr_scheduler import FineTuneCosineAnnealingWarmupLR +from internlm.solver.optimizer import HybridZeroOptimizer +from internlm.solver.optimizer.utils import ParamBcastSyncHandler +from internlm.utils.common import DummyProfile +from internlm.utils.logger import get_logger +from internlm.utils.megatron_timers import megatron_timer as timer +from internlm.utils.parallel import ( + is_no_pp_or_last_stage, + sync_model_param, + sync_model_param_within_tp, +) +from internlm.utils.registry import MODEL_INITIALIZER + +logger = get_logger(__file__) + + +def initialize_model(): + """ + Initialize model. + + Returns: The neural network model to be trained or evaluated. + """ + + model = MODEL_INITIALIZER.get_module(module_name=gpc.config.model_type)(**(gpc.config.model)) + if isinstance(model, nn.ModuleList): + model = nn.ModuleList( + [ + NaiveAMPModel( + model=_m, + output_to_fp32=False, # manually controlled by interleaved pipleline scheduler + dtype=gpc.config.model.get("dtype", torch.half), + sync_buffer=False, + ) + for _m in model + ] + ) + else: + model = NaiveAMPModel( + model=model, + output_to_fp32=is_no_pp_or_last_stage(), + dtype=gpc.config.model.get("dtype", torch.half), + sync_buffer=False, + ) + + # This sync is very important, cause the model weights kept in optimizer are copied + # from the origin parameters in the memory, so we should make sure the dp sync + # does not influence the model weights in optimizer be different with the origin parameters. + sync_model_param(model, parallel_mode=ParallelMode.DATA) + + # This function is needed to make sure parameters that are not splitted by tensor parallelism are + # the same across tensor parallelism. + sync_model_param_within_tp(model) + + return model + + +def initialize_optimizer(model: Union[nn.Module, nn.ModuleList]): + """ + Initialize optimizer. + + Args: + model (torch.nn.Module): Your model instance to be trained or evaluated. + + Returns: A tuple of (optimizer, beta2_scheduler, lr_scheduler). + """ + if gpc.config.hybrid_zero_optimizer.overlap_sync_param: + param_bcast_sync_handler = ParamBcastSyncHandler(model) + else: + param_bcast_sync_handler = None + + adam_cfg = gpc.config.adam + naive_optimizer = torch.optim.AdamW( + params=[{"params": model.parameters(), "weight_decay": adam_cfg.weight_decay}], + lr=adam_cfg.lr, + betas=(adam_cfg.adam_beta1, adam_cfg.adam_beta2), + eps=adam_cfg.adam_eps, + ) + + optimizer = HybridZeroOptimizer( + naive_optimizer, + grad_scal_cfg=gpc.config.grad_scaler, + zero_cfg=gpc.config.hybrid_zero_optimizer, + param_bcast_sync_handler=param_bcast_sync_handler, + ) + + beta2_scheduler = Beta2Scheduler(optimizer=naive_optimizer, **gpc.config.beta2_scheduler) + + lr_scheduler = FineTuneCosineAnnealingWarmupLR(optimizer, **gpc.config.lr_scheduler) + + return optimizer, beta2_scheduler, lr_scheduler + + +def get_train_data_loader( + num_worker: int = 0, dataset_generate_func: Callable = None, train_sampler=None, train_collate_fn=None +): + """ + Generate and return the training data loader. + + Returns: A tuple of (train_dl, dataset_types). + """ + + # Get the dataset types + dataset_types = None + dataset_types = list(DATASET_TYPE_IDS_MAP.keys()) + data_cfg = gpc.config.data + + # Get the sample weight dictionary + train_folder = data_cfg.train_folder + + if not train_folder: + train_ds = RandomDataset(num_samples=1000000, max_len=data_cfg.seq_len) + if data_cfg.pack_sample_into_one: + train_ds = PackedDatasetWithoutCuSeqlen( + train_ds, max_length_per_sample=data_cfg.seq_len, packed_length=data_cfg.packed_length + ) + else: + train_ds = PackedDataset( + train_ds, max_length_per_sample=data_cfg.seq_len, packed_length=data_cfg.packed_length + ) + else: + if dataset_generate_func is not None: + train_ds = dataset_generate_func() + else: + train_ds = get_packed_dataset_without_short_length( + folder=data_cfg.train_folder, + packed_length=data_cfg.packed_length, + max_length_per_sample=data_cfg.seq_len, + show_progress=dist.get_rank() == 0, + min_length=data_cfg.min_length, + min_length_dict=data_cfg.get("min_length_dict", {}), + pack_into_one_sample=data_cfg.pack_sample_into_one, + ) + + if dataset_generate_func is None or not train_folder: + # partition already completed + assert isinstance(train_ds, (PackedDataset, PackedDatasetWithoutCuSeqlen, ConcatDataset)) + # Create the training dataset sampler + train_sampler = StaticBatchSampler( + train_ds.datasets if isinstance(train_ds, ConcatDataset) else [train_ds], + batch_size=data_cfg.micro_num, + rampup_batch_size=data_cfg.rampup_batch_size, + micro_bsz=data_cfg.micro_bsz, + seed=1024, + drop_last=True, + data_rank=gpc.get_local_rank(ParallelMode.DATA), + data_world_size=gpc.get_world_size(ParallelMode.DATA), + ) + + if dataset_generate_func is None or not train_folder: + train_collate_fn = partial(packed_collate_fn, packed_length=data_cfg.packed_length) + + # Create the training data loader + train_dl = DataLoader( + dataset=train_ds, + batch_sampler=train_sampler, + num_workers=num_worker, + pin_memory=True, + collate_fn=train_collate_fn, + persistent_workers=num_worker > 0, + ) + + return train_dl, dataset_types + + +def get_validation_data_loader( + num_worker: int = 0, dataset_generate_func: Callable = None, val_collate_fn=None, dataloader_func=None +): + """Generate and return the validation data loader.""" + + data_cfg = gpc.config.data + + if not data_cfg.valid_folder: + val_ds = RandomDataset(num_samples=gpc.get_world_size(ParallelMode.DATA) * 500, max_len=data_cfg.seq_len) + else: + if dataset_generate_func is not None: + assert val_collate_fn and dataloader_func is not None + val_ds = dataset_generate_func() + else: + val_ds = get_dataset_dict(folder=data_cfg.valid_folder, split="") + + if not isinstance(val_ds, dict): + val_ds = {"val": val_ds} + + if val_collate_fn is None or not data_cfg.valid_folder: + val_collate_fn = partial(jsonl_ds_collate_fn, max_length_per_sample=data_cfg.seq_len) + + val_dls = {} + for val_name, ds in val_ds.items(): + if dataloader_func and data_cfg.valid_folder is not None: + val_dls[val_name] = dataloader_func(dataset=ds, collate_fn=val_collate_fn) + if gpc.is_rank_for_log(): + logger.info( + f"load validation dataset {val_name} with valid batch size {str(data_cfg.valid_micro_num)} and " + f"{ds.size} Byte samples." + ) + else: + # making the batch_size of validate larger can speed up the evaluation, but it should not be too large, + # otherwise too much data may be dropped + batch_size = min( + data_cfg.valid_micro_num * data_cfg.micro_bsz, len(ds) // gpc.get_world_size(ParallelMode.DATA) + ) + batch_size = batch_size // data_cfg.micro_bsz * data_cfg.micro_bsz + + if batch_size == 0 and gpc.is_rank_for_log(): + logger.info(f"skip validate {val_name}.") + continue + + val_dls[val_name] = get_dpsampler_dataloader( + ds, + shuffle=False, + num_workers=num_worker, + batch_size=batch_size, + collate_fn=val_collate_fn, + drop_last=True, + ) # drop_last=True, otherwise it may cause problems in the last batch + + if gpc.is_rank_for_log(): + logger.info( + f"load validation dataset {val_name} with valid batch size {str(batch_size)} and " + f"samples {str(len(val_dls[val_name]))}." + ) + + return val_dls + + +def load_new_batch(train_dl: DataLoader, train_iter: Iterable, train_state: TrainState): + """ + Load and return the new batch data based on training data loader. + + Args: + train_dl (torch.utils.data.DataLoader): Dataloader for training. + train_iter (Iterable): Data iterator from which get a batch of data, obtained by calling iter(dataloader). + train_state (TrainState): Current training state. + + Returns: A batch data and the updated train_iter. + """ + + timer("batch-gen").start() + try: + batch = next(train_iter) # structure is ({'input_ids': Tensor, 'cu_seqlens': Tensor}, Tensor) + if hasattr(train_state, "batch_sampler_iter"): + next(train_state.batch_sampler_iter) + except StopIteration: + train_iter = iter(train_dl) + batch = next(train_iter) + train_state.num_consumed_samples_in_epoch = 0 + if hasattr(train_state, "batch_sampler"): + train_state.batch_sampler_iter = iter(train_state.batch_sampler) + next(train_state.batch_sampler_iter) + timer("batch-gen").stop() + + if batch[0].get("type_ids", None) is not None: + # if use_flash_attn is False, we need to unpack type_ids + if not gpc.config.model.use_flash_attn: + batch[0]["type_ids"] = unpack_data(batch[0]["type_ids"], batch[0]["cu_seqlens"]) + + return batch, train_iter + + +def initialize_llm_profile(profiling: bool = False, start_time: str = None): + """Initialize and return the profiler context manager instance.""" + + if profiling and gpc.get_local_rank(ParallelMode.DATA) == 0 and gpc.get_local_rank(ParallelMode.TENSOR) == 0: + llm_profile = torch.profiler.profile + logger.info(f"Do profiling in rank {gpc.get_global_rank()}!") + else: + llm_profile = DummyProfile + + return llm_profile( + activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], + schedule=torch.profiler.schedule(skip_first=5, wait=1, warmup=1, active=1, repeat=1), + on_trace_ready=torch.profiler.tensorboard_trace_handler( + f"{gpc.config.JOB_NAME}/{start_time}/traces/rank{gpc.get_global_rank()}_" + + f"dp{gpc.get_local_rank(ParallelMode.DATA)}_" + + f"tp{gpc.get_local_rank(ParallelMode.TENSOR)}_" + + f"pp{gpc.get_local_rank(ParallelMode.PIPELINE)}", + ), + with_stack=True, + with_modules=True, + ) + + +def record_current_batch_training_metrics( + get_tflops_func, + logger, + writer, + success_update, + batch_count, + batch, + train_state, + optimizer, + beta2_scheduler, + trainer, + start_time, + loss, + grad_norm, + metric, + update_panel, +): + """ + Print some training metrics of current batch. + """ + + set_env_var(key="LAST_ACTIVE_TIMESTAMP", value=int(time.time())) + + if success_update in (0, True): + train_state.num_consumed_tokens += batch[1].nelement() * gpc.get_world_size(ParallelMode.DATA) + if is_no_pp_or_last_stage(): + acc_perplex = metric.get_metric() + + if success_update and gpc.is_rank_for_log(): + lr = optimizer.param_groups[0]["lr"] + if hasattr(trainer.engine.optimizer, "grad_scaler"): + scaler = trainer.engine.optimizer.grad_scaler._scale.item() + elif hasattr(trainer.engine.optimizer.optim, "grad_scaler"): + scaler = trainer.engine.optimizer.optim.grad_scaler._scale.item() + + num_tokens_in_batch = batch[1].nelement() + num_samples_in_batch = sum([len(b) - 1 for b in batch[0]["cu_seqlens"]]) + max_length_in_batch = max([(b[1:] - b[:-1]).max().item() for b in batch[0]["cu_seqlens"]]) + max_samples_in_batch = max([len(b) - 1 for b in batch[0]["cu_seqlens"]]) + min_samples_in_batch = min([len(b) - 1 for b in batch[0]["cu_seqlens"]]) + + tk_per_gpu = 0 + tk_per_gpu = round( + num_tokens_in_batch + * gpc.get_world_size(ParallelMode.DATA) + / gpc.get_world_size(ParallelMode.GLOBAL) + / (time.time() - start_time), + 2, + ) + + tflops = get_tflops_func((time.time() - start_time)) + + infos = { + "tflops": tflops, + "step": batch_count, + "loss": loss.item(), + "tgs (tokens/gpu/second)": tk_per_gpu, + "lr": lr, + "loss_scale": scaler, + "grad_norm": grad_norm, + } + + infos["micro_num"] = len(batch[1]) + infos["num_consumed_tokens"] = train_state.num_consumed_tokens + infos["inf_nan_skip_batches"] = train_state.inf_nan_skip_batches + infos["num_samples_in_batch"] = num_samples_in_batch # the number of batches which have the most samples + infos["largest_length"] = max_length_in_batch # the longest input + infos["largest_batch"] = max_samples_in_batch # the batch with the most samples + infos["smallest_batch"] = min_samples_in_batch + infos["adam_beta2"] = beta2_scheduler.get_beta2() + + fwd_bwd_time = round(timer("fwd-bwd").elapsed(), 2) + infos["fwd_bwd_time"] = fwd_bwd_time + + for key, value in acc_perplex.items(): + infos[key] = value + + line = "" + for key, value in infos.items(): + line += f"{key}={value} " + writer.add_scalar(key=key, value=value, step=train_state.step_count) + + if update_panel: + logger.info( + line, + extra={ + "step": batch_count, + "lr": lr, + "num_consumed_tokens": train_state.num_consumed_tokens, + "grad_norm": grad_norm, + "loss": loss.item(), + "flops": tflops, + "tgs": tk_per_gpu, + "acc": acc_perplex["acc"], + "perplexity": acc_perplex["perplexity"], + "fwd_bwd_time": fwd_bwd_time, + }, + ) + else: + logger.info(line) + + # if loss spike occurs, send alert info to feishu + mm.monitor_loss_spike(alert_address=gpc.config.alert_address, step_count=batch_count, cur_step_loss=loss.item()) diff --git a/internlm/utils/common.py b/internlm/utils/common.py index 7c069ee..f3b58c0 100644 --- a/internlm/utils/common.py +++ b/internlm/utils/common.py @@ -34,18 +34,6 @@ def get_master_node(): return result -def get_process_rank(): - proc_rank = -1 - if os.getenv("SLURM_PROCID") is not None: - proc_rank = int(os.getenv("SLURM_PROCID")) - elif os.getenv("RANK") is not None: - # In k8s env, we use $RANK. - proc_rank = int(os.getenv("RANK")) - - # assert proc_rank != -1, "get_process_rank cant't get right process rank!" - return proc_rank - - def move_norm_to_cuda(norm: Union[float, torch.Tensor]) -> Union[float, torch.Tensor]: if torch.is_tensor(norm) and norm.device.type != "cuda": norm = norm.to(torch.cuda.current_device()) @@ -81,28 +69,12 @@ def move_to_device(data): data_to_return = [] for element in data: if isinstance(element, dict): - data_to_return.append( - { - k: ( - _move_tensor(v) - if k != "inference_params" - else v._replace(attention_mask=_move_tensor(v.attention_mask)) - ) - for k, v in element.items() - } - ) + data_to_return.append({k: _move_tensor(v) for k, v in element.items()}) else: data_to_return.append(_move_tensor(element)) data = data_to_return elif isinstance(data, dict): - data = { - k: ( - _move_tensor(v) - if k != "inference_params" - else v._replace(attention_mask=_move_tensor(v.attention_mask)) - ) - for k, v in data.items() - } + data = {k: _move_tensor(v) for k, v in data.items()} else: raise TypeError(f"Expected batch data to be of type torch.Tensor, list, tuple, or dict, but got {type(data)}") return data @@ -246,3 +218,21 @@ def get_megatron_flops( tflops = flops_per_iteration / (elapsed_time_per_iter * global_world_size * (10**12)) return tflops + + +class DummyProfile: + """ + Dummy Profile. + """ + + def __init__(self, *args, **kwargs) -> None: + pass + + def __enter__(self): + return self + + def __exit__(self, a, b, c): + pass + + def step(self): + pass diff --git a/internlm/utils/evaluation.py b/internlm/utils/evaluation.py new file mode 100644 index 0000000..f1b2a20 --- /dev/null +++ b/internlm/utils/evaluation.py @@ -0,0 +1,168 @@ +from contextlib import contextmanager + +import torch +import torch.distributed as dist +from tqdm import tqdm + +from internlm.core.context import ParallelMode +from internlm.core.context import global_context as gpc +from internlm.core.scheduler import SchedulerMetricHook +from internlm.model.metrics import AccPerplex + + +@contextmanager +def switch_evaluation_no_pipeline_scheduler(trainer, grad_accum_size, grad_accum_batch_size, metric_hook_list): + if not gpc.is_using_pp(): + prev_data_process_func = trainer.schedule.data_process_func + prev_grad_accum_size = trainer.schedule._grad_accum_size + prev_grad_accum_batch_size = trainer.schedule._grad_accum_batch_size + prev_metric_hooks = trainer.schedule._hooks + try: + trainer.schedule.data_process_func = None + trainer.schedule._grad_accum_size = grad_accum_size + trainer.schedule._grad_accum_batch_size = grad_accum_batch_size + trainer.schedule._hooks = metric_hook_list + yield + finally: + trainer.schedule.data_process_func = prev_data_process_func + trainer.schedule._grad_accum_size = prev_grad_accum_size + trainer.schedule._grad_accum_batch_size = prev_grad_accum_batch_size + trainer.schedule._hooks = prev_metric_hooks + + +@contextmanager +def switch_evaluation_pipeline_scheduler(trainer, num_microbatches, tensor_shape, metric_hook_list): + if gpc.is_using_pp(): + pre_data_process_func = trainer.schedule.data_process_func + prev_num_microbatches = trainer.schedule.num_microbatches + prev_tensor_shape = trainer.schedule.tensor_shape + prev_metric_hooks = trainer.schedule._hooks + try: + trainer.schedule.data_process_func = None + trainer.schedule.num_microbatches = num_microbatches + trainer.schedule.tensor_shape = tensor_shape + trainer.schedule._hooks = metric_hook_list + yield + finally: + trainer.schedule.data_process_func = pre_data_process_func + trainer.schedule.num_microbatches = prev_num_microbatches + trainer.schedule.tensor_shape = prev_tensor_shape + trainer.schedule._hooks = prev_metric_hooks + + +@contextmanager +def switch_sequence_parallel_mode(): + prev_mode = gpc.config.parallel.sequence_parallel + try: + gpc.config.parallel.sequence_parallel = False + yield + finally: + gpc.config.parallel.sequence_parallel = prev_mode + + +def evaluate_on_val_dls( + trainer, + val_dls, + writer, + logger, + step_count, + update_panel: bool = False, + streaming: bool = False, +): + with switch_sequence_parallel_mode(): + torch.cuda.empty_cache() + trainer.eval() + verbose = gpc.is_rank_for_log() + data_cfg = gpc.config.data + + for val_name, val_dl in val_dls.items(): + if len(val_dl) == 0 and verbose and not streaming: + logger.info(f"Validation dataset: {val_name} is empty") + continue + + val_metric = AccPerplex( + device=torch.cuda.current_device(), + tp_pg=gpc.get_group(ParallelMode.TENSOR), + dp_pg=gpc.get_group(ParallelMode.DATA), + ) + val_sche_metric_hook = SchedulerMetricHook(metric=val_metric) + + val_loss = 0 + val_idx = -1 + for val_idx, batch in tqdm( + enumerate(val_dl), + desc="Val.", + total=len(val_dl) if not streaming else None, + position=1, + disable=not verbose, + leave=False, + ): + with torch.inference_mode(): + if gpc.is_using_pp(): + total_val_bsz = len(batch[1]) + assert total_val_bsz % data_cfg.micro_bsz == 0 + num_microbatches = total_val_bsz // data_cfg.micro_bsz + tensor_shape = torch.Size( + [data_cfg.micro_bsz, batch[0]["input_ids"].shape[1], gpc.config.HIDDEN_SIZE] + ) + + with switch_evaluation_pipeline_scheduler( + trainer=trainer, + num_microbatches=num_microbatches, + tensor_shape=tensor_shape, + metric_hook_list=[val_sche_metric_hook], + ): + _, _, loss = trainer.execute_schedule( + batch, forward_only=True, return_loss=True, return_output_label=False + ) + else: + total_val_bsz = len(batch[1]) + assert total_val_bsz % data_cfg.micro_bsz == 0 + grad_accum_size = total_val_bsz // data_cfg.micro_bsz + grad_accum_batch_size = data_cfg.micro_bsz + with switch_evaluation_no_pipeline_scheduler( + trainer=trainer, + grad_accum_size=grad_accum_size, + grad_accum_batch_size=grad_accum_batch_size, + metric_hook_list=[val_sche_metric_hook], + ): + _, _, loss = trainer.execute_schedule( + batch, forward_only=True, return_loss=True, return_output_label=False + ) + if verbose: + val_loss += loss.item() + + assert val_idx != -1 + dist.barrier() + + val_res = val_metric.get_metric() + if verbose and len(val_dl) != 0: + val_loss = val_loss / (val_idx + 1 + 1e-6) + infos = { + "step": step_count, + f"val/{val_name}_loss": val_loss, + f"val/{val_name}_acc": val_res["acc"], + f"val/{val_name}_plex": val_res["perplexity"], + } + + for key, value in infos.items(): + writer.add_scalar(key=key, value=value, step=step_count) + + if update_panel: + logger.info( + f"Validation on {val_name}: " + " ".join([f"{key}={value}" for key, value in infos.items()]), + extra={ + "step": step_count, + "val_loss": val_loss, + "val_acc": val_res["acc"], + "val_perplexity": val_res["perplexity"], + }, + ) + else: + logger.info( + f"Validation on {val_name}: " + " ".join([f"{key}={value}" for key, value in infos.items()]) + ) + + trainer.train() + torch.cuda.empty_cache() + dist.barrier() diff --git a/internlm/utils/logger.py b/internlm/utils/logger.py index a4a9f03..679913a 100644 --- a/internlm/utils/logger.py +++ b/internlm/utils/logger.py @@ -2,6 +2,7 @@ # -*- encoding: utf-8 -*- import logging +import os LOGGER_NAME = "internlm" LOGGER_FORMAT = "%(asctime)s\t%(levelname)s %(filename)s:%(lineno)s in %(funcName)s -- %(message)s" @@ -11,6 +12,8 @@ LOGGER_LEVEL_HELP = ( "The logging level threshold, choices=['debug', 'info', 'warning', 'error', 'critical'], default='info'" ) +uniscale_logger = None + def get_logger(logger_name: str = LOGGER_NAME, logging_level: str = LOGGER_LEVEL) -> logging.Logger: """Configure the logger that is used for uniscale framework. @@ -24,6 +27,10 @@ def get_logger(logger_name: str = LOGGER_NAME, logging_level: str = LOGGER_LEVEL logger (logging.Logger): the created or modified logger. """ + + if uniscale_logger is not None: + return uniscale_logger + logger = logging.getLogger(logger_name) if logging_level not in LOGGER_LEVEL_CHOICES: @@ -39,3 +46,53 @@ def get_logger(logger_name: str = LOGGER_NAME, logging_level: str = LOGGER_LEVEL logger.addHandler(handler) return logger + + +def initialize_uniscale_logger( + job_name: str = None, + launch_time: str = None, + file_name: str = None, + name: str = LOGGER_NAME, + level: str = LOGGER_LEVEL, + file_path: str = None, + is_std: bool = True, +): + """ + Initialize uniscale logger. + + Args: + job_name (str): The name of training job, defaults to None. + launch_time (str): The launch time of training job, defaults to None. + file_name (str): The log file name, defaults to None. + name (str): The logger name, defaults to "internlm". + level (str): The log level, defaults to "info". + file_path (str): The log file path, defaults to None. + is_std (bool): Whether to output to console, defaults to True. + + Returns: + Uniscale logger instance. + """ + + try: + from uniscale_monitoring import get_logger as get_uniscale_logger + except ImportError: + print("Failed to import module uniscale_monitoring. Use default python logger.") + return None + + if not file_path: + assert ( + job_name and launch_time and file_name + ), "If file_path is None, job_name, launch_time and file_name must be setted." + log_file_name = file_name + log_folder = os.path.join(job_name, launch_time, "logs") + log_dir = os.path.join(log_folder, log_file_name) + file_path = log_dir + + logger = get_uniscale_logger(name=name, level=level, filename=file_path, is_std=is_std) + if isinstance(logger, (list, tuple)): + logger = list(logger)[0] + + global uniscale_logger + uniscale_logger = logger + + return logger diff --git a/internlm/utils/megatron_timers.py b/internlm/utils/megatron_timers.py index 6c4ed11..e319a80 100644 --- a/internlm/utils/megatron_timers.py +++ b/internlm/utils/megatron_timers.py @@ -14,18 +14,19 @@ class _Timer: self.elapsed_ = 0.0 self.started_ = False self.start_time = time.time() + self.stream = torch.cuda.current_stream() def start(self): """Start the timer.""" assert not self.started_, "timer has already been started" - torch.cuda.synchronize() + self.stream.synchronize() self.start_time = time.time() self.started_ = True def stop(self): """Stop the timer.""" assert self.started_, "timer is not started" - torch.cuda.synchronize() + self.stream.synchronize() self.elapsed_ += time.time() - self.start_time self.started_ = False diff --git a/internlm/utils/model_checkpoint.py b/internlm/utils/model_checkpoint.py index 687399e..09bafa5 100644 --- a/internlm/utils/model_checkpoint.py +++ b/internlm/utils/model_checkpoint.py @@ -2,8 +2,11 @@ # -*- encoding: utf-8 -*- import copy +import fcntl import os +import socket import time +from enum import Enum from typing import Dict import torch @@ -11,15 +14,26 @@ import torch from internlm.core.context import ParallelMode from internlm.core.context import global_context as gpc from internlm.core.trainer import TrainState +from internlm.monitor import send_alert_message from internlm.solver.optimizer import HybridZeroOptimizer from internlm.utils.common import get_current_device from internlm.utils.logger import get_logger from internlm.utils.megatron_timers import megatron_timer as timer -from internlm.utils.storage_manager import get_fns, llm_load, llm_save +from internlm.utils.storage_manager import ( + get_fns, + get_storage_manager, + llm_load, + llm_save, +) logger = get_logger(__file__) +class CheckpointType(Enum): + NORMAL_CHECKPOINT = 1 + SNAPSHOT_CHECKPOINT = 2 + + def get_model_topology(model): """ Returns: @@ -138,11 +152,13 @@ def save_optimizer_checkpoint(optim, state_path): zero_rank = gpc.get_local_rank(ParallelMode.ZERO1) tp_rank = gpc.get_local_rank(ParallelMode.TENSOR) pp_rank = gpc.get_local_rank(ParallelMode.PIPELINE) + tp_size = gpc.get_world_size(ParallelMode.TENSOR) + pp_size = gpc.get_world_size(ParallelMode.PIPELINE) fp = f"optimizer_tp{tp_rank}_pp{pp_rank}_zo{zero_rank}.pt" states = optim.state_dict() if isinstance(optim, HybridZeroOptimizer): - if gpc.get_global_rank() < optim.zero_world_size: + if gpc.get_global_rank() < optim.zero_world_size * tp_size * pp_size: llm_save(os.path.join(state_path, fp), states) if "zero_devide_optim_plan" in states: params_per_rank_id_dict = states.pop("zero_devide_optim_plan") @@ -152,44 +168,6 @@ def save_optimizer_checkpoint(optim, state_path): llm_save(os.path.join(state_path, fp), states) -def save_checkpoint(folder, model, optimizer, scheduler, train_state: TrainState, model_config: Dict = None): - """ - Save checkpoint to the given folder path. - """ - - start = time.time() - torch.distributed.barrier() - folder = os.path.join(folder, str(train_state.step_count)) - logger.info( - f"Saving checkpoint to `{folder}` at batch count:{train_state.step_count} from rank:{gpc.get_global_rank()}..." - ) - - timer("save-model").start() - save_model_checkpoint(folder=folder, model=model) - timer("save-model").stop() - - timer("save-optimizer").start() - save_optimizer_checkpoint(optim=optimizer, state_path=folder) - timer("save-optimizer").stop() - - if gpc.is_rank_for_log(): - scheduler_states = scheduler.state_dict() - llm_save(os.path.join(folder, "schedulder.pt"), saved_obj=scheduler_states) - - sampler_state = train_state.batch_sampler.state_dict() - llm_save(os.path.join(folder, "sampler.pt"), saved_obj=sampler_state) - llm_save(os.path.join(folder, "context.pt"), saved_obj=train_state.state_dict()) - - if model_config is not None: - llm_save(os.path.join(folder, "model_config.pt"), saved_obj=model_config) - - torch.distributed.barrier() - - if gpc.is_rank_for_log(): - timer.log(["save-model", "save-optimizer"], logger=logger) - logger.info(f"Step: {train_state.step_count}, rank 0 save ckpt use {time.time() - start:.3f} s") - - def load_optimizer_checkpoint(folder, optim): """Load the optimizer state from the local file system or remote object storage Service (OSS). @@ -287,3 +265,369 @@ def load_scheduler(ckpt_path: str, lr_scheduler, optimizer, learning_rate, train if gpc.is_rank_for_log(): logger.info(f"reload load_scheduler:{lr_scheduler}") + + +class CheckpointManager: + """StorageManagerContext""" + + def __init__(self, ckpt_config, model, model_config=None, model_config_file=None, feishu_address=None) -> None: + """ + CheckpointManager is used to decide when to store ckpt. If it is an asynchronous + upload mode, you must call wait_async_upload_finish at the end of the program to wait + for the asynchronous ckpt upload to complete. + + Args: + ckpt_config (dict): model checkpoint config. + model (nn.module): model obj + optimizer (object): optimzier obj. + lr_scheduler (object): lr_scheduler obj. + model_config (dict): model config. + """ + self.enable_save_ckpt = ckpt_config.enable_save_ckpt + self.checkpoint_every = ckpt_config.checkpoint_every + self.save_ckpt_folder = ckpt_config.save_ckpt_folder + self.snapshot_ckpt_folder = ckpt_config.snapshot_ckpt_folder + self.oss_snapshot_freq: int = ckpt_config.oss_snapshot_freq + self.stop_file_path = ckpt_config.stop_file_path + self.load_model_only_folder = ckpt_config.load_model_only_folder + self.feishu_address = feishu_address + self.storage_manager = get_storage_manager() + self.snapshot_counter = 0 + self.load_optimizer = gpc.config.ckpt.load_optimizer + + self.model = model + self.model_config = model_config + self.model_config_file = model_config_file + + if self.stop_file_path and gpc.get_global_rank() == 0: + dir_path = os.path.dirname(self.stop_file_path) + if dir_path != "" and not os.path.exists(dir_path): + os.makedirs(dir_path) + with open(self.stop_file_path, "w", encoding="utf-8") as f: + f.write("0") + + if ckpt_config.load_given_ckpt is False: + # Priority: load_given_ckpt(True) > latest_checkpoint > load_model_only_folder + latest_ckpt_path = self.query_lastest_ckpt() + if latest_ckpt_path: + self.load_ckpt_folder = latest_ckpt_path + else: + # At this time, we have to load model init weights and train from step 0. + self.load_ckpt_folder = self.load_model_only_folder + else: + self.load_ckpt_folder = ckpt_config.load_ckpt_folder + + if gpc.is_rank_for_log(): + logger.info(f"load_ckpt_folder will set to :'{self.load_ckpt_folder}'") + if self.stop_file_path is None: + logger.warning("no set stop_file_path, quit_signal_handler is disable") + + def quit_signal_handler(self, train_state) -> bool: + """ + Exit signal detection function, if we write the exit step in the 'QUIT_FILE_PATH' file, + all ranks will save ckpt and exit. + Negative integer step means save ckpt. + Positive integer step means save ckpt and quit. + + Args: + train_state (TrainState): + Returns: + bool: whether to quit. + """ + now_break, now_save_ckpt, save_type = False, False, CheckpointType.NORMAL_CHECKPOINT + + if self.stop_file_path is None: + return now_break, now_save_ckpt, save_type + + with open(self.stop_file_path, "a+", encoding="utf-8") as f: + fcntl.flock(f, fcntl.LOCK_EX) + f.seek(0) + msg = f.read() + fcntl.flock(f, fcntl.LOCK_UN) + action_step = int(msg) + + if action_step < 0 and abs(action_step) == train_state.step_count: + now_save_ckpt = True + + if action_step > 0 and action_step == train_state.step_count: + now_break, now_save_ckpt = True, True + + if action_step != 0 and gpc.is_rank_for_log(): + msg = "Stop" if action_step > 0 else "Save" + action_step = abs(action_step) + if train_state.step_count <= action_step: + if self.feishu_address: + send_alert_message( + address=self.feishu_address, + message=f"training will {msg} at step_count {action_step}!\ +now step_count is {train_state.step_count}", + ) + + return now_break, now_save_ckpt, save_type + + def try_save_checkpoint(self, train_state): + if not self.enable_save_ckpt: + return False + + save_ckpts, save_type = False, CheckpointType.NORMAL_CHECKPOINT + if self.oss_snapshot_freq > 1 and train_state.step_count % self.oss_snapshot_freq == 0: + save_ckpts, save_type = True, CheckpointType.SNAPSHOT_CHECKPOINT + if train_state.step_count % self.checkpoint_every == 0: + save_ckpts, save_type = True, CheckpointType.NORMAL_CHECKPOINT + now_break, singal_save_ckpts, singal_save_type = self.quit_signal_handler(train_state) + if save_ckpts is False: + save_ckpts = singal_save_ckpts + save_type = singal_save_type + + if save_ckpts: + # Wait for the previous round of asynchronous upload storage to complete. + self.storage_manager.wait() + if save_type == CheckpointType.SNAPSHOT_CHECKPOINT: + # Snapshot number, with only two snapshots written alternately. + self.snapshot_counter = (self.snapshot_counter + 1) % 2 + save_ckpt_folder = os.path.join(self.snapshot_ckpt_folder, f"{self.snapshot_counter}") + else: + save_ckpt_folder = os.path.join(self.save_ckpt_folder, str(train_state.step_count)) + + self.save_checkpoint( + folder=save_ckpt_folder, + model=self.model, + optimizer=self.optimizer, + scheduler=self.lr_scheduler, + train_state=train_state, + model_config=self.model_config, + model_config_file=self.model_config_file, + ) + + return now_break + + def wait_async_upload_finish(self): + """wait for all checkpoint uploads to be completed""" + self.storage_manager.wait() + torch.distributed.barrier() + + def query_latest_snapshot_step_boto3(self): + """query_latest_snapshot_step_boto3 + Returns: + Tuple(str, int): path of latest ckpt and ckpt step, if not found, None will return. + """ + ckpt_list = self.storage_manager.get_fns(self.save_ckpt_folder) + if len(ckpt_list) == 0: + return None, None + + max_normal_step = 0 + ckpt_list = list(map(lambda a: int(a.strip("/")) if a.strip("/").isdigit() else 0, ckpt_list)) + ckpt_list.sort(reverse=True) + for ckpt in ckpt_list: + fns_list = self.storage_manager.get_fns(os.path.join(self.save_ckpt_folder, str(ckpt))) + for fn in fns_list: + if fn.endswith(".step"): + max_normal_step = ckpt + break + if max_normal_step != 0: + break + + max_normal_step = ckpt_list[0] + load_normal_ckpt_path = os.path.join(self.save_ckpt_folder, str(max_normal_step)) + + snapshot_path_0 = os.path.join(self.save_ckpt_folder, "snapshot", "0") + snapshot_path_1 = os.path.join(self.save_ckpt_folder, "snapshot", "1") + ckpt_list_1 = self.storage_manager.get_fns(snapshot_path_0) + ckpt_list_2 = self.storage_manager.get_fns(snapshot_path_1) + max_step_0, max_step_1 = 0, 0 + for ckpt in ckpt_list_1: + ckpt = ckpt.strip("/") + if ckpt.endswith(".step"): + max_step_0 = max(max_step_0, int(ckpt.split(".")[0])) + for ckpt in ckpt_list_2: + ckpt = ckpt.strip("/") + if ckpt.endswith(".step"): + max_step_1 = max(max_step_1, int(ckpt.split(".")[0])) + + snap_load_path = snapshot_path_0 if max_step_0 > max_step_1 else snapshot_path_1 + snap_step = max(max_step_0, max_step_1) + load_path = snap_load_path if snap_step > max_normal_step else load_normal_ckpt_path + load_step = max(snap_step, max_normal_step) + return load_path, load_step + + def query_latest_snapshot_step_local(self): + max_step, max_step_path = 0, None + for root, _, files in os.walk(self.save_ckpt_folder, followlinks=True): + for fn in files: + fn = fn.strip("/") + if fn.endswith(".step"): + # We assume that both normal ckpt and snapshot ckpt will store the '.step' file + # as an integrity flag. + step = int(fn.rsplit(".", maxsplit=1)[0]) + if max_step < step: + max_step = step + max_step_path = root + + return max_step_path, max_step + + def query_lastest_ckpt(self): + latest_checkpoint = None + # Training was automatically restarted by the process, forcing the latest snapshot to be read. + if self.save_ckpt_folder: + if self.save_ckpt_folder.startswith("boto3"): + latest_checkpoint, step = self.query_latest_snapshot_step_boto3() + elif self.save_ckpt_folder.startswith("local"): + latest_checkpoint, step = self.query_latest_snapshot_step_local() + else: + latest_checkpoint, step = None, 0 + + if latest_checkpoint is not None: + if gpc.is_rank_for_log(): + logger.info(f"Found latest ckpt : {latest_checkpoint}, step: {step}") + send_alert_message( + address=self.feishu_address, + message=f"Auto restart resume from ckpt-path: '{latest_checkpoint}', step : {step}", + ) + else: + if gpc.is_rank_for_log(): + send_alert_message( + address=self.feishu_address, + message=f"Can't find snapshot checkpoint, use default load-ckpt path: {latest_checkpoint}", + ) + + return latest_checkpoint + + def try_load_model(self, current_time=""): + model_load_path = None + + if self.load_ckpt_folder and self.load_model_only_folder: + raise ValueError( + "Error, try to use both load_ckpt_folder and load_model_only_folder paths, \ +if you only need to load model weights (for example starting an SFT task for the first time), \ +set load_model_only_folder path, if you need to resume training from ckpt, \ +set load_ckpt_folder or use default value \ +(if is the default value, internlm will try to load the latest ckpt from save_ckpt_folder)" + ) + + if self.load_ckpt_folder: + if gpc.is_rank_for_log(): + logger.info( + f"===========Resume training from `{self.load_ckpt_folder}` {current_time} on host:" + f"{socket.gethostname()}===========" + ) + model_load_path = self.load_ckpt_folder + elif self.load_model_only_folder: + if gpc.is_rank_for_log(): + logger.info( + f"===========Load Model from `{self.load_model_only_folder}` {current_time} on host:" + f"{socket.gethostname()}===========" + ) + model_load_path = self.load_model_only_folder + else: + if gpc.is_rank_for_log(): + logger.info( + f"===========New Run {current_time} on host:{socket.gethostname()},rank={gpc.get_global_rank()}," + f"tp={gpc.get_local_rank(ParallelMode.TENSOR)},pp={gpc.get_local_rank(ParallelMode.PIPELINE)}," + f"dp={gpc.get_local_rank(ParallelMode.DATA)}===========" + ) + + # Loading model weights must be done before zero is initialized. + if model_load_path is not None: + load_model_checkpoint(folder=model_load_path, model=self.model) + + def try_resume_training(self, lr_scheduler, optimizer, lr, train_state, train_dl): + """Attempt to restore the training state of the last ckpt. + + Args: + lr_scheduler (_LRScheduler): lr_scheduler object. + optimizer (Optimizer): optimizer object. + lr (float): learning rate. + train_state (dict): traing states. + train_dl (DataLoader): traning dataloader object + """ + if self.load_ckpt_folder is not None: + # load optimzier states. + if self.load_optimizer: + load_optimizer_checkpoint(self.load_ckpt_folder, optimizer) + # load lr scheduler states. + load_scheduler(self.load_ckpt_folder, lr_scheduler, optimizer, lr, train_state) + # load training states. + load_context(self.load_ckpt_folder, train_dl, train_state) + # load dataloader sampler states. + if hasattr(train_state, "batch_sampler") and not isinstance( + train_state.batch_sampler, torch.utils.data.sampler.BatchSampler + ): + load_sampler(self.load_ckpt_folder, train_dl.batch_sampler) + if hasattr(train_state, "data_state_dict"): + train_dl.dataset.load_state_dict( + llm_load(os.path.join(self.load_ckpt_folder, "sampler_0.pt")), ckpt_path=self.load_ckpt_folder + ) + self.optimizer = optimizer + self.lr_scheduler = lr_scheduler + + def save_checkpoint( + self, + folder, + model, + optimizer, + scheduler, + train_state: TrainState, + model_config: Dict = None, + model_config_file: str = None, + ): + """ + Save checkpoint to the given folder path. + """ + + start = time.time() + self.set_save_folder(folder, train_state.step_count) + torch.cuda.synchronize() + torch.distributed.barrier() + if gpc.is_rank_for_log(): + logger.info(f"Saving checkpoint to `{folder}` at batch count:{train_state.step_count}...") + + timer("save-model").start() + save_model_checkpoint(folder=folder, model=model) + timer("save-model").stop() + + timer("save-optimizer").start() + save_optimizer_checkpoint(optim=optimizer, state_path=folder) + timer("save-optimizer").stop() + + if ( + hasattr(train_state, "data_state_dict") + and gpc.get_local_rank(ParallelMode.TENSOR) == 0 + and gpc.get_local_rank(ParallelMode.PIPELINE) == 0 + ): + llm_save( + os.path.join(folder, f"sampler_{gpc.get_local_rank(ParallelMode.DATA)}.pt"), + saved_obj=train_state.data_state_dict, + ) + + if gpc.is_rank_for_log(): + scheduler_states = scheduler.state_dict() + llm_save(os.path.join(folder, "schedulder.pt"), saved_obj=scheduler_states) + if hasattr(train_state, "batch_sampler") and not isinstance( + train_state.batch_sampler, torch.utils.data.sampler.BatchSampler + ): + sampler_state = train_state.batch_sampler.state_dict() + llm_save(os.path.join(folder, "sampler.pt"), saved_obj=sampler_state) + llm_save(os.path.join(folder, "context.pt"), saved_obj=train_state.state_dict()) + + if model_config is not None: + # Model configuration dictionary. + llm_save(os.path.join(folder, "model_config.pt"), saved_obj=model_config) + + if model_config_file is not None: + # The complete training config file content, stored in binary format. + llm_save(os.path.join(folder, "config_file.pt"), saved_obj=model_config_file) + + torch.distributed.barrier() + + if gpc.is_rank_for_log(): + timer.log(["save-model", "save-optimizer"], logger=logger) + logger.info(f"Step: {train_state.step_count}, rank 0 save ckpt use {time.time() - start:.3f} s") + if self.storage_manager.async_mode is False: + llm_save( + os.path.join(folder, f"{train_state.step_count}.step"), + saved_obj=dict({"step": train_state.step_count}), + ) + + def set_save_folder(self, folder, step): + self.storage_manager.latest_save_folder = folder + self.storage_manager.latest_save_step = step diff --git a/internlm/utils/parallel.py b/internlm/utils/parallel.py index 87ea3a6..cffcdc1 100644 --- a/internlm/utils/parallel.py +++ b/internlm/utils/parallel.py @@ -46,3 +46,16 @@ def sync_model_param_within_tp(model): def is_no_pp_or_last_stage(): return not gpc.is_initialized(ParallelMode.PIPELINE) or gpc.is_last_rank(ParallelMode.PIPELINE) + + +def get_parallel_log_file_name(): + if gpc.is_rank_for_log(): + fn_prefix = "main_" # Indicates a rank with more output information + else: + fn_prefix = "" + + log_file_name = ( + f"{fn_prefix}dp={gpc.get_local_rank(ParallelMode.DATA)}_" + f"tp={gpc.get_local_rank(ParallelMode.TENSOR)}_pp={gpc.get_local_rank(ParallelMode.PIPELINE)}" + ) + return log_file_name diff --git a/internlm/utils/registry.py b/internlm/utils/registry.py index f2da5a5..7cbfcc5 100644 --- a/internlm/utils/registry.py +++ b/internlm/utils/registry.py @@ -22,9 +22,9 @@ class Registry: """Registers a module represented in `module_class`. Args: - module_class (class): The module to be registered. + module_name (str): The name of module to be registered. Returns: - class: The module to be registered, so as to use it normally if via importing. + function: The module to be registered, so as to use it normally if via importing. Raises: AssertionError: Raises an AssertionError if the module has already been registered before. """ diff --git a/internlm/utils/simple_memory_profiler.py b/internlm/utils/simple_memory_profiler.py index 4ca6679..9caf0a2 100644 --- a/internlm/utils/simple_memory_profiler.py +++ b/internlm/utils/simple_memory_profiler.py @@ -1,15 +1,13 @@ import os import time from collections import OrderedDict -from functools import partial +from functools import partial, reduce from typing import Any, Dict, List, Tuple import pyecharts import torch -from internlm.core.context import ParallelMode -from internlm.core.context import global_context as gpc -from internlm.solver.pipeline_utils import partition_uniform +from internlm.core.naive_amp import NaiveAMPModel mb = 1024 * 1024 @@ -107,6 +105,8 @@ class SimpleMemState: """ Update the total memory usage of the model and sub-models. """ + self._total_mem = self._layer_mem + for stat in self.sub_model_stats.values(): # Update sub-model status first. stat.update_total_memory() @@ -169,6 +169,39 @@ class SimpleMemState: return {"name": self.layer_name, "children": children} +class ActivationMemState: + """ + Activation Memory State + """ + + def __init__(self, num_chunks: int) -> None: + self._num_chunks = num_chunks + + self.inited: List[bool] = [False for _ in range(num_chunks)] + self.states: List[SimpleMemState] = [SimpleMemState(f"activations_{idx}") for idx in range(num_chunks)] + + @property + def total_mem(self) -> int: + return sum(state.total_mem for state in self.states) + + def dump(self, prefix: str = "") -> str: + return reduce(lambda x, y: x + y, [state.dump(prefix) for state in self.states]) + + def to_json(self, base: int = 1024 * 1024) -> List: + return [state.to_json(base) for state in self.states] + + +def _unpack_naive_wrapper(model: torch.nn.Module) -> Tuple[torch.nn.Module, int]: + num_chunks = len(model) if isinstance(model, torch.nn.ModuleList) else 1 + + if num_chunks > 1: + model = torch.nn.ModuleList([_model.model if isinstance(_model, NaiveAMPModel) else _model for _model in model]) + else: + model = model.model if isinstance(model, NaiveAMPModel) else model + + return model, num_chunks + + class SimpleMemoryProfiler: """ A memory profiler for a llm model. @@ -177,7 +210,7 @@ class SimpleMemoryProfiler: model (torch.nn.Module): The model to profile. optimizer (torch.optim.Optimizer): The optimizer used for training the model. log_file (str): The file to write the memory state information to. - activation_config (List[str], optional): The list of activation layers to track. Defaults to None. + total_steps: number of steps to trace. """ def __init__( @@ -186,9 +219,8 @@ class SimpleMemoryProfiler: optimizer: torch.optim.Optimizer, log_folder: str, total_steps: int = 5, - activation_config: List[str] = None, ): - self._model = model + self._model, self._num_model_chunks = _unpack_naive_wrapper(model) self._optimizer = optimizer self._log_folder = log_folder self._remaining_steps = total_steps @@ -197,17 +229,20 @@ class SimpleMemoryProfiler: self._record_start_time = time.time() # For activation memory state. - self._activation_config = activation_config - self._activation_mem_inited: bool = False + self._activation_mem: int = 0 - self._activation_max_count = 0 - self._activation_base_mem: SimpleMemState = SimpleMemState("activations") + self._activation_mem_max: int = 0 + self._activation_base_mems = ActivationMemState(self._num_model_chunks) # Check or create log folder os.makedirs(self._log_folder, exist_ok=True) # Register activation memory tracking hooks - self._register_activation_trace_hooks() + if self._num_model_chunks > 1: + for chunk_id in range(self._num_model_chunks): + self._register_activation_trace_hooks(chunk_id, self._model[chunk_id]) + else: + self._register_activation_trace_hooks(0, self._model) # Calculate static parameter cuda memory self._param_mem_state = SimpleMemState("param_mem") @@ -221,7 +256,7 @@ class SimpleMemoryProfiler: self._calc_tensor_group_memory(self._os_params_mem_state, list(enumerate(self._optimizer.param_groups))) # Generate the first memory record - self.point(create=True) + self.point(with_options="params,grads,os_params", create=True) def point(self, with_options: str = "", create: bool = False) -> None: """ @@ -272,7 +307,7 @@ class SimpleMemoryProfiler: if "os_state" in options: layout_info += "os_state_layout:\n" + self._os_state_mem_state.dump() if "activation_base" in options: - layout_info += "activation_base_layout:\n" + self._activation_base_mem.dump() + layout_info += "activation_base_layout:\n" + self._activation_base_mems.dump() # Write memory state information to log file file_mode = "w" if create else "a" @@ -315,14 +350,14 @@ class SimpleMemoryProfiler: [self._os_params_mem_state.to_json(), self._os_state_mem_state.to_json()], "os_memory_sunburst", ) - self._render_sunburst_chart(self._activation_base_mem.to_json()["children"], "activation_memory_sunburst") + self._render_sunburst_chart(self._activation_base_mems.to_json(), "activation_memory_sunburst") # Generate summary sunburst chart summary_sunburst_data = [ {"name": "params", "value": self._param_mem_state.total_mem // mb}, {"name": "grads", "value": self._grad_mem_state.total_mem // mb}, {"name": "os_params", "value": self._os_params_mem_state.total_mem // mb}, {"name": "os_state", "value": self._os_state_mem_state.total_mem // mb}, - {"name": "activation", "value": self._activation_base_mem.total_mem // mb}, + {"name": "activation", "value": self._activation_mem_max // mb}, ] self._render_sunburst_chart(summary_sunburst_data, "summary_sunburst") @@ -337,12 +372,13 @@ class SimpleMemoryProfiler: {}, { "r0": "10%", - "r": "40%", + "r": "35%", "itemStyle": {"borderWidth": 3}, "label": {"align": "left"}, }, - {"r0": "40%", "r": "65%", "label": {"align": "left"}}, - {"r0": "65%", "r": "80%", "label": {"align": "left"}}, + {"r0": "35%", "r": "55%", "label": {"align": "left"}}, + {"r0": "55%", "r": "70%", "label": {"align": "left"}}, + {"r0": "70%", "r": "80%", "label": {"align": "left"}}, {"r0": "80%", "r": "90%", "label": {"align": "left"}}, { "r0": "90%", @@ -357,7 +393,14 @@ class SimpleMemoryProfiler: f"{self._log_folder}/{name}.html" ) - def _inner_activation_trace_hook(self, layer_name: str, model: Any, inputs: Any, output: torch.Tensor) -> None: + def _inner_activation_trace_hook( + self, + chunk_id: int, + layer_name: str, + model: Any, + inputs: Any, + output: torch.Tensor, + ) -> None: """ Hook function to trace the activation memory usage for a inner layer. @@ -373,13 +416,15 @@ class SimpleMemoryProfiler: del model, inputs assert isinstance(output, torch.Tensor), f"Invalid output type: {type(output)}" - if self._stoped or self._activation_mem_inited: + if self._stoped or self._activation_base_mems.inited[chunk_id]: return # Delay updating the total_mem of activation_base_mem here, it will be handled in the forward ending hook. - self._activation_base_mem.add(layer_name, output.element_size() * output.nelement(), flush=False) + self._activation_base_mems.states[chunk_id].add( + layer_name, output.element_size() * output.nelement(), flush=False + ) - def _activation_trace_hook_forward(self, model: Any, inputs: Any, output: torch.Tensor) -> None: + def _activation_trace_hook_forward(self, chunk_id: int, model: Any, inputs: Any, output: torch.Tensor) -> None: """ Hook function to trace the activation memory usage for a forward pass. @@ -398,23 +443,24 @@ class SimpleMemoryProfiler: return # Check if the activation memory has been initialized - if self._activation_mem_inited is False: + if self._activation_base_mems.inited[chunk_id] is False: + self._activation_base_mems.inited[chunk_id] = True # Update the total memory of the activation base memory state - self._activation_base_mem.update_total_memory() + self._activation_base_mems.states[chunk_id].update_total_memory() # Set with_options to "activation_base" to include activation_base_layout in the memory dump - self._activation_mem_inited = True + with_options = "activation_base" + else: + with_options = "" # Accumulate activation memory usage for each forward pass - self._activation_mem += self._activation_base_mem.total_mem - - # Update activation max count - if self._activation_mem // self._activation_base_mem.total_mem > self._activation_max_count: - self._activation_max_count = self._activation_mem // self._activation_base_mem.total_mem + self._activation_mem += self._activation_base_mems.states[chunk_id].total_mem + if self._activation_mem > self._activation_mem_max: + self._activation_mem_max = self._activation_mem # Trigger a memory record - self.point() + self.point(with_options) - def _activation_tarce_hook_backward(self, model: Any, inputs: Any, grad_outputs: Any) -> None: + def _activation_tarce_hook_backward(self, chunk_id: int, model: Any, inputs: Any, grad_outputs: Any) -> None: """ Hook function to trace the activation memory usage for a backward pass. @@ -432,37 +478,28 @@ class SimpleMemoryProfiler: return # Release activation memory usage for each backward pass - self._activation_mem -= self._activation_base_mem.total_mem + self._activation_mem -= self._activation_base_mems.states[chunk_id].total_mem # Trigger a memory record self.point() - def _register_activation_trace_hooks(self) -> None: + def _register_activation_trace_hooks(self, chunk_id: int, model_chunk: torch.nn.Module) -> None: """ Register activation trace hooks for the model and each submodule in the model. """ # Register inner activation trace hooks for each submodule in the model - for layer_name in self._activation_config: - # Register a hook for every activation - model = self._model - sub_models = layer_name.split(".") - # Get the target sub-model - for sub_model_name in sub_models: - try: - model = model.get_submodule(sub_model_name) - except AttributeError: - model = None - break - + for layer_name, sub_model in model_chunk.named_modules(): # Register the hook - if model is not None: - model.register_forward_hook(partial(self._inner_activation_trace_hook, layer_name)) + if len(sub_model._modules) != 0: + continue # TODO: in some special cases, we may need some additional configuration to correct + + sub_model.register_forward_hook(partial(self._inner_activation_trace_hook, chunk_id, layer_name)) # Register a forward hook for the main model to track activation memory usage - self._model.register_forward_hook(self._activation_trace_hook_forward) + model_chunk.register_forward_hook(partial(self._activation_trace_hook_forward, chunk_id)) # Register a backward hook for the main model to release activation memory usage - self._model.register_full_backward_hook(self._activation_tarce_hook_backward) + model_chunk.register_full_backward_hook(partial(self._activation_tarce_hook_backward, chunk_id)) def _calc_tensor_memory( self, root_stat: SimpleMemState, named_tensors: Dict[str, torch.Tensor], require_grad: bool = False @@ -554,48 +591,6 @@ class SimpleMemoryProfiler: self._calc_tensor_memory(root_stat, named_tensors) -def build_activation_config(num_layers: int, num_chunks: int = 1) -> List[str]: - # TODO: support interleaved pipeline scheduling. - assert num_chunks == 1, "Only support num_chunks == 1" - - if gpc.is_initialized(ParallelMode.PIPELINE): - pipeline_size = gpc.get_world_size(ParallelMode.PIPELINE) - pipeline_rank = gpc.get_local_rank(ParallelMode.PIPELINE) - else: - pipeline_size = 1 - pipeline_rank = 0 - - all_parts = partition_uniform(num_layers, pipeline_size, num_chunks) - parts = all_parts[pipeline_rank] - start, end = parts[0] - num_blocks = end - start - - block_conf_tmpl = [ - "mixer.rotary_emb", - "mixer.Wqkv", - "mixer.inner_attn", - "mixer.inner_cross_attn", - "mixer.out_proj", - # "dropout1", # skip when dropout_selective_checkpoint is True - # "dropout2", # skip when dropout_selective_checkpoint is True - "norm1", - "norm2", - "mlp.w1", - "mlp.w2", - "mlp.w3", - ] - - block_conf = [] - for block_id in range(num_blocks): - block_conf += [f"blocks.{block_id}.{layer}" for layer in block_conf_tmpl] - - # We don't need to care about whether the embedding, norm, and head layers exist in the model after partitioning. - # If they don't exist, they will be automatically ignored when registering activation trace hooks. - activation_conf = ["embedding", "norm", "head"] + block_conf - - return activation_conf - - if __name__ == "__main__": class SimpleModel(torch.nn.Module): @@ -635,32 +630,39 @@ if __name__ == "__main__": return output + def _simple_schedule(_num_chunks, _model_chunks, _input) -> torch.Tensor: + if _num_chunks > 1: + _output = _input + for _model_chunk in _model_chunks: + _output = _model_chunk(_output) + else: + _output = _model_chunks(_input) + + return _output + + # num_chunks config + _num_chunks = 1 + # init model and optimizer - _model: torch.nn.Module = SimpleModel() + if _num_chunks > 1: + _chunks = [SimpleModel(skip_layer2=idx % 2 == 0) for idx in range(_num_chunks)] + _model = torch.nn.ModuleList(_chunks).cuda() + else: + _model: torch.nn.Module = SimpleModel().cuda() _optimizer = torch.optim.Adam(_model.parameters()) - # create activation config for simple model layer by layer. - activation_configs = [ - # model level 0 - "layer1", - "layer2", - "layer3", - # model level 1 - "layer2.layer1", - "layer2.layer3", - ] - - _model.modules() - # init profiler - profiler = SimpleMemoryProfiler(_model, _optimizer, "./test_simple_memory_profiler.log", activation_configs) + profiler = SimpleMemoryProfiler(_model, _optimizer, "./test_simple_memory_profiler", total_steps=1) _optimizer.zero_grad() - x1 = torch.randn((128, 5120)) - x2 = torch.randn((128, 5120)) - out1 = _model(x1) - out2 = _model(x2) + # inputs + x1 = torch.randn((128, 5120)).cuda() + x2 = torch.randn((128, 5120)).cuda() + # forward + out1 = _simple_schedule(_num_chunks, _model, x1) + out2 = _simple_schedule(_num_chunks, _model, x2) + # backward out1.mean().backward() out2.mean().backward() diff --git a/internlm/utils/storage_manager.py b/internlm/utils/storage_manager.py index 8bd7c88..c7b71f4 100644 --- a/internlm/utils/storage_manager.py +++ b/internlm/utils/storage_manager.py @@ -1,21 +1,34 @@ #!/usr/bin/env python # -*- encoding: utf-8 -*- +import asyncio +import concurrent.futures import hashlib import io import os +import pickle import re import socket -from enum import Enum -from typing import Any, Dict, List, Union +import stat +from asyncio import InvalidStateError +from asyncio.tasks import ALL_COMPLETED +from datetime import datetime +from typing import Any, Awaitable, Callable, Dict, List, Union -import boto3 -import botocore import torch +import torch.distributed as dist +from internlm.core.context import global_context as gpc from internlm.utils.common import SingletonMeta from internlm.utils.logger import get_logger +try: + import boto3 + import botocore +except ImportError: + pass + + logger = get_logger(__file__) boto3_url_re = re.compile(r"([^\.]+)\.([\d\.]+)") @@ -41,10 +54,6 @@ def llm_save(save_path: str, saved_obj: Any, *args, **kwargs): storage_manager.save(save_path, *args, saved_obj=saved_obj, **kwargs) -class CheckpointType(Enum): - NORMAL_CHECKPOINT = 1 - - class StorageClient: """ StorageClient as a client for s3 storage access. @@ -54,7 +63,7 @@ class StorageClient: self.handler = handler @staticmethod - def load(client, load_path: str, map_location): + def load(client, load_path: str, *args, **kwargs): raise NotImplementedError @staticmethod @@ -71,25 +80,51 @@ class StorageClient: class Boto3MetaInfo: - def __init__(self, client: StorageClient, bucket_name: str, endpoint: str, file_path: str) -> None: - self.client = client + """Boto3 meta info for save/load etc.""" + + def __init__( + self, + is_async, + handler: StorageClient, + bucket_name: str, + endpoint: str, + file_path: str, + async_upload_fn: callable, + local_nvme_path=None, + ) -> None: + self.is_async = is_async + self.client = handler self.bucket_name = bucket_name self.endpoint = endpoint self.file_path = file_path + self.async_upload_fn = async_upload_fn + self.local_nvme_path = local_nvme_path + + def __str__(self) -> str: + return f"is_async: {self.is_async}, bucket_name:{self.bucket_name}, endpoint:{self.endpoint}, \ +local_nvme_path: {self.local_nvme_path}" class LocalMetaInfo: - def __init__(self, client: StorageClient, dest_path: str) -> None: - self.client = client + """Local meta info for save/load etc.""" + + def __init__(self, handler: StorageClient, dest_path: str) -> None: + self.is_async = False + self.client = handler self.dest_path = dest_path + self.async_upload_fn = None def unpack_meta(meta): args = [] + is_async = meta.is_async for k, v in meta.__dict__.items(): - if k == "endpoint": + if k in ("endpoint", "async_upload_fn", "is_async"): + continue + if not is_async and k in ("local_nvme_path",): continue args.append(v) + return args @@ -101,21 +136,6 @@ def compute_file_md5_by_chunk(file_name: str): return hash_md5.hexdigest() -def get_boto3_meta(fp: str) -> Boto3MetaInfo: - assert fp.startswith("s3://"), f"Path '{fp}' is not a boto3 url" - parts = fp.lstrip("s3://").split(os.path.sep) - match = boto3_url_re.match(parts[0]) - assert match is not None, f"url '{fp}' is not a valid boto3 url" - bucket_name, endpoint = match.group(1), match.group(2) - endpoint = "http://" + endpoint + ":80" - return Boto3MetaInfo(None, bucket_name, endpoint, os.path.sep.join(parts[1:])) - - -def get_local_meta(fp: str) -> LocalMetaInfo: - assert not fp.startswith("s3://"), f"Path '{fp}' is not a local path" - return LocalMetaInfo(None, fp) - - class Boto3Client(StorageClient): """ Boto3Client @@ -169,7 +189,9 @@ class Boto3Client(StorageClient): ) @staticmethod - def sync_upload_fileobj(handler, bucket_name: str, fp: str, *args, saved_obj=None, **kwargs): + def sync_upload_fileobj( + handler, bucket_name: str, fp: str, local_nvme_path: str, *args, saved_obj=None, **kwargs + ): # pylint: disable=W0613 assert saved_obj is not None, "saved_obj is None!" try: with io.BytesIO() as f: @@ -182,7 +204,14 @@ class Boto3Client(StorageClient): ) from exc @staticmethod - def load(handler, bucket_name: str, fp: str, *args, map_location="cpu", **kwargs) -> Dict: + def load( + handler, + bucket_name: str, + fp: str, + local_nvme_path: str, # pylint: disable=W0613 + *args, + **kwargs, + ) -> Dict: """ Args: fp (str): Path to save, eg. s3://opennlplab/model_weights/xxx/ddd.pt @@ -191,7 +220,7 @@ class Boto3Client(StorageClient): with io.BytesIO() as f: handler.client.download_fileobj(bucket_name, fp, f, Config=handler.config) f.seek(0) - states = torch.load(f, *args, map_location=map_location, **kwargs) + states = torch.load(f, *args, **kwargs) except handler.botocore.exceptions.EndpointConnectionError as exc: raise RuntimeError( f"Boto3 Network Error: Please Check your Internet Connection in {socket.gethostname()}" @@ -199,28 +228,40 @@ class Boto3Client(StorageClient): return states @staticmethod - def assert_fp_exists( - handler, - bucket_name: str, - fp: str, - ): + def assert_fp_exists(handler, bucket_name: str, fp: str, local_nvme_path: str): # pylint: disable=W0613 assert len(list(handler.client.list_objects(Bucket=bucket_name, Prefix=fp)["Contents"])) > 0, fp @staticmethod - def get_fns(handler, bucket_name: str, fp: str): + def get_fns(handler, bucket_name: str, fp: str, local_nvme_path: str, *args, **kwargs): # pylint: disable=W0613 """ Ref: https://stackoverflow.com/questions/54314563/ how-to-get-more-than-1000-objects-from-s3-by-using-list-objects-v2 """ paginator = handler.client.get_paginator("list_objects_v2") pages = paginator.paginate(Bucket=bucket_name, Prefix=fp) - folder_name_list = [] for page in pages: - for obj in page["Contents"]: - fp: str = obj["Key"] - folder_name_list.append(fp.rsplit("/", maxsplit=1)[1]) - return folder_name_list + if "Contents" in page: + for obj in page["Contents"]: + pth: str = obj["Key"] + folder_name_list.append(pth.split(fp, maxsplit=1)[1].strip("/").split("/", maxsplit=1)[0]) + return list(set(folder_name_list)) + + @staticmethod + def async_upload_fileobj(handler, bucket_name: str, fp: str, local_nvme_path: str): + try: + with open(local_nvme_path, "rb") as f: + handler.client.upload_fileobj(f, bucket_name, fp, Config=handler.config) + except handler.botocore.exceptions.EndpointConnectionError as exc: + raise RuntimeError( + f"Boto3 Network Error: Please Check your Internet Connection in {socket.gethostname()}" + ) from exc + except Exception as e: + raise e + + @staticmethod + def delete_obj(handler, fp: str): + raise NotImplementedError("boto3 not support delete_obj") class LocalClient(StorageClient): @@ -241,11 +282,11 @@ class LocalClient(StorageClient): torch.save(saved_obj, fp, *args, **kwargs) @staticmethod - def load(handler, fp: str, *args, map_location="cpu", **kwargs): + def load(handler, fp: str, *args, **kwargs): # pylint: disable=W0613 assert isinstance(handler, LocalClient) assert os.path.exists(fp), f"{fp} is not found!" with open(fp, "rb") as f: - states = torch.load(f, map_location=map_location, *args, **kwargs) + states = torch.load(f, *args, **kwargs) return states @staticmethod @@ -267,9 +308,77 @@ class LocalClient(StorageClient): os.remove(fp) +def get_tmp_file_name(tmp_local_folder: str, fp: str): + """ + It should be noted that all our temporary files will be stored in the same folder, + so the file name passed upstream must be unique. + """ + base_path = os.path.join(tmp_local_folder, fp.split("/")[-1]) + current_time = datetime.now().strftime("%b%d_%H-%M-%S") + pid = os.getpid() + # step = self.step_counter + return "-".join([base_path, current_time, str(pid)]) + ".tmpfile" # , str(step) + + +def get_boto3_meta(fp: str, tmp_local_folder: str, is_async: bool) -> Boto3MetaInfo: + assert fp.startswith("s3://"), f"Path '{fp}' is not a boto3 url" + parts = fp.lstrip("s3://").split(os.path.sep) + match = boto3_url_re.match(parts[0]) + assert match is not None, f"url '{fp}' is not a valid boto3 url" + bucket_name, endpoint = match.group(1), match.group(2) + endpoint = "http://" + endpoint + ":80" + tmp_step_file = get_tmp_file_name(tmp_local_folder, fp) + return Boto3MetaInfo( + is_async=is_async, + handler=None, + bucket_name=bucket_name, + endpoint=endpoint, + file_path=os.path.sep.join(parts[1:]), + async_upload_fn=Boto3Client.async_upload_fileobj, + local_nvme_path=tmp_step_file, + ) + + +def get_local_meta(fp: str) -> LocalMetaInfo: + assert not fp.startswith("s3://"), f"Path '{fp}' is not a local path" + return LocalMetaInfo(None, fp) + + +def get_mount_point_free_size(path: str): + """ + Returns the remaining space of the temporary storage mount point as a percentage. + Args: + path (str): temporary storage folder path. + + Raises: + FileNotFoundError: If the temporary storage folder does not exist, + an error will be reported。 + """ + if os.path.exists(path): + st = os.statvfs(path) + # f_bavail: Number of free blocks for unprivileged users. + # f_bsize: Filesystem block size. + # return unit is TB. + return st.f_bavail * st.f_bsize / (1024**3) + + +def check_tmp_folder_accessibility(tmp_local_folder: str): + """ + Check access permissions for temporary storage. + """ + ret = True + if os.path.exists(tmp_local_folder): + ret &= os.access(tmp_local_folder, os.W_OK) + ret &= os.access(tmp_local_folder, os.R_OK) + if ret is False: + error_str = f'{socket.gethostname()} dose not have read and write permissions on {tmp_local_folder}"' + raise RuntimeError(error_str) + + class StorageManager(metaclass=SingletonMeta): """ Storage Manager for saving or loading checkpoint. + TODO: add a thread to poll the asynchronous storage state. """ BACKEND_TYPE = {"boto3", "local"} @@ -279,8 +388,44 @@ class StorageManager(metaclass=SingletonMeta): } CLI_DICT = {} - def __init__(self) -> None: - pass + def __init__(self, enable_save, tmp_local_folder="/dev/shm/test/", async_mode=True, n_async_workers=8) -> None: + self._exception_list = [] + self._to_be_del_files = [] + self._async_stack = [] + self.upload_count = 0 + self.tmp_local_folder = tmp_local_folder + self.async_mode = async_mode + self.has_warning = False + self._async_loop = None + self._thread_pool = None + self.latest_save_folder = None + self.latest_save_step = 0 + self.async_task_peeding = False + + if enable_save and self.async_mode: + self._async_loop = asyncio.new_event_loop() + self._thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=n_async_workers) + + check_tmp_folder_accessibility(os.path.dirname(self.tmp_local_folder)) + + # Try to create tmp folder + try: + os.makedirs(self.tmp_local_folder, exist_ok=True) + os.chmod(self.tmp_local_folder, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + except FileExistsError: + pass + + # In case it is a directory created by other users, we check the permissions again. + check_tmp_folder_accessibility(self.tmp_local_folder) + + # Try to clean tmp folder's empty folder. + self.try_delete_tmpfile(self.tmp_local_folder) + + # Avaliable storeage space check. + free_size = get_mount_point_free_size(self.tmp_local_folder) + if free_size < 0.1: + logger.error(f'tmp_local_folder only have "{free_size}" GB free space, less then 100 GB!') + raise RuntimeError(f"Insufficient temporary storage space on {socket.gethostname()}") def _get_client(self, path=str) -> Union[Boto3MetaInfo, LocalMetaInfo]: """ @@ -301,7 +446,7 @@ class StorageManager(metaclass=SingletonMeta): meta_info = get_local_meta(path) backend_key = backend elif backend == "boto3": - meta_info = get_boto3_meta(path) + meta_info = get_boto3_meta(path, self.tmp_local_folder, self.async_mode) backend_key = backend + ":" + meta_info.endpoint init_args = (meta_info.endpoint,) if ( @@ -310,10 +455,12 @@ class StorageManager(metaclass=SingletonMeta): or "HTTP_PROXY" in os.environ or "HTTPS_PROXY" in os.environ ): - raise RuntimeWarning( - "HTTP/HTTPS proxy is detected when using boto3, incorrectly setting \ -the proxy may make boto3 unavailable or affect performance." - ) + if not self.has_warning: + logger.warning( + "HTTP/HTTPS proxy is detected when using boto3, incorrectly setting \ + the proxy may make boto3 unavailable or affect performance." + ) + self.has_warning = True assert backend in StorageManager.BACKEND_TYPE, f"Unkown backend: {backend}" @@ -333,19 +480,145 @@ the proxy may make boto3 unavailable or affect performance." meta = self._get_client(path=folder) return meta.client.get_fns(*unpack_meta(meta)) - def save(self, save_path: str, saved_obj: Any, *args, **kwargs): + def save(self, save_path: str, saved_obj: Any, *args, async_upload=None, **kwargs): meta = self._get_client(path=save_path) - meta.client.sync_upload_fileobj(*unpack_meta(meta), *args, saved_obj=saved_obj, **kwargs) - - def load(self, load_path: str, *args, map_location="cpu", **kwargs) -> Any: + if async_upload is None: + async_upload = self.async_mode + if async_upload: + assert ( + self.tmp_local_folder + ), "StorageManager is not setted tmp_local_folder, so async save cannot be performed." + tmp_step_file = meta.local_nvme_path + self._to_be_del_files.append(tmp_step_file) + with open(tmp_step_file, "wb") as f: + torch.save(saved_obj, f, pickle_protocol=pickle.HIGHEST_PROTOCOL) + self.async_executor(meta.async_upload_fn, *unpack_meta(meta)) + os.chmod(tmp_step_file, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) + self.async_task_peeding = True + else: + meta.client.sync_upload_fileobj(*unpack_meta(meta), *args, saved_obj=saved_obj, **kwargs) + self.upload_count += 1 + def load(self, load_path: str, *args, **kwargs) -> Any: + self.wait() meta = self._get_client(path=load_path) - return meta.client.load(*unpack_meta(meta), map_location=map_location, *args, **kwargs) + return meta.client.load(*unpack_meta(meta), *args, **kwargs) def delete_obj(self, fp: str): meta = self._get_client(path=fp) meta.client.delete_obj(*unpack_meta(meta)) + def _del_tmp_folder(self): + for fp in self._to_be_del_files: + try: + os.remove(fp) + except FileNotFoundError: + pass + except SystemError as e: + logger.error(f'delete file: {fp}, failed for reason:"{e}"') + else: + pass -storage_manager = StorageManager() + def try_delete_tmpfile(self, tmp_dir: str): + """Delete temporary files in tmp_dir.""" + + for filename in os.listdir(tmp_dir): + if filename.endswith(".tmpfile"): + file_path = os.path.join(tmp_dir, filename) + try: + os.remove(file_path) + logger.info(f"Delete tmpfile: {file_path}") + except OSError: + # Ignore deletion errors + pass + + async def _sync_tasks(self) -> Awaitable[None]: + if self._async_stack: + await asyncio.wait(self._async_stack, return_when=ALL_COMPLETED) + count = 0 + while self._async_stack: + t = self._async_stack[0] + try: + e = t.exception() + if e: + self._exception_list.append((e, count)) + logger.error(f"File:{self._to_be_del_files[count]}, upload failed for {e}") + # raise e + count += 1 + self._async_stack.pop(0) + except InvalidStateError: + # Not finished. https://docs.python.org/3/library/asyncio-task.html#asyncio.Task.exception + pass + + def async_executor(self, fn: Callable, *args, **kwargs) -> None: + """ + Overview: + Execute task in background, then apppend the future instance in _async_stack. + Arguments: + - fn (:obj:`Callable`): Synchronization fuction. + """ + if not self._async_loop: + raise RuntimeError("Event loop was not initialized, please call this function in async or parallel mode") + t = self._async_loop.run_in_executor(self._thread_pool, fn, *args, **kwargs) + self._async_stack.append(t) + + def wait(self) -> bool: + """Wait for async operations to complete.""" + + if not self.async_mode: + return + + if not self.async_task_peeding: + return + + if self._async_loop: + self._async_loop.run_until_complete(self._sync_tasks()) + + if self._exception_list: + for error_msg, file_id in self._exception_list: + logger.error( + f"Node:{socket.gethostname()}, Error: Checkpoint {self._to_be_del_files[file_id]} " + f"failed on step {self.upload_count}: {error_msg}" + ) + + # TODO: Re-upload in sync mode + raise RuntimeError( + f"Failed to upload {self._to_be_del_files[file_id]} " f"on step {self.upload_count}: {error_msg}" + ) + + self._del_tmp_folder() + self._exception_list.clear() + self._to_be_del_files.clear() + self.async_task_peeding = False + + if gpc.is_rank_for_log(): + self.upload_count += 1 + if self.async_mode: + self.save( + os.path.join(self.latest_save_folder, f"{self.latest_save_step}.step"), + saved_obj=dict({"step": self.latest_save_step}), + async_upload=False, + ) + + +storage_manager: StorageManager = None + + +def init_storage_manager(ckpt_config): + global storage_manager + storage_manager = StorageManager( + ckpt_config.enable_save_ckpt, + tmp_local_folder=ckpt_config.async_upload_tmp_folder, + async_mode=ckpt_config.async_upload, + ) + + +def get_storage_manager(): + assert storage_manager is not None, "storage_manager has not been init!" + return storage_manager + + +def wait_async_upload_finish(): + dist.barrier() + storage_manager.wait() diff --git a/internlm/utils/writer.py b/internlm/utils/writer.py new file mode 100644 index 0000000..0997817 --- /dev/null +++ b/internlm/utils/writer.py @@ -0,0 +1,142 @@ +import logging +import os +import socket +import sys +import traceback +from functools import partial + +import torch +from torch.utils.tensorboard import SummaryWriter + +from internlm.core.context import global_context as gpc + + +def tb_save_run_info(writer, config_lines, global_step=0): + writer.add_text(tag="cmd", text_string=" ".join(sys.argv[:]), global_step=global_step) + lines = [] + for line in config_lines: + if line.strip().startswith("#"): + continue + lines.append(line) + writer.add_text(tag="config", text_string="\n".join(lines), global_step=global_step) + + +def init_tb_writer( + job_name: str, + launch_time: str, + file_name: str, + tensorboard_folder: str, + resume_tb_folder: str, + step_count: int, + config: str, + logger: logging.Logger, +): + tb_log_file_name = file_name + if not tensorboard_folder: + tb_folder = os.path.join(job_name, launch_time, "tensorboards") + else: + tb_folder = tensorboard_folder + + if gpc.get_global_rank() == 0: + # If we don't load ckpt, 'resume_tb_folder' is set as the tensorboard + # dir of the last task by 'make_launch_script.sh'. + # If we load ckpt, 'resume_tb_folder' will be overwritten as the + # reloaded 'train_state.resume_tb_folder'.s + if resume_tb_folder is not None: + assert len(resume_tb_folder) > 0 and resume_tb_folder != "/" + if not os.path.exists(resume_tb_folder): + logger.error( + f"Can't found resume_tb_folder{resume_tb_folder}, \ +please make sure this folder is located at local file system." + ) + else: + logger.info(f"Try mv tensorboard logs: {resume_tb_folder} to {tb_folder}... ") + os.system(f"cp -r {resume_tb_folder}/* {tb_folder}/") + os.system(f"chmod -R +w {tb_folder}/") + else: + logger.info(f"Login tensorboard logs to: {tb_folder}") + + tb_logdir = os.path.join(tb_folder, tb_log_file_name) + writer = SummaryWriter(log_dir=tb_logdir, max_queue=5, purge_step=step_count, flush_secs=3) + writer.add_text(tag="job_name", text_string=job_name, global_step=step_count) + writer.add_text(tag="tensorboard_folder", text_string=tb_logdir, global_step=step_count) + + torch.distributed.broadcast_object_list([tb_folder], src=0) + else: + objects = [None] + torch.distributed.broadcast_object_list(objects, src=0) + tb_folder = objects[0] + tb_logdir = os.path.join(tb_folder, tb_log_file_name) + writer = SummaryWriter(log_dir=tb_logdir, max_queue=5, purge_step=step_count, flush_secs=3) + + if gpc.is_rank_for_log(): + tb_save_run_info( + writer=writer, + config_lines=config, + global_step=step_count, + ) + + writer.add_text( + tag=f"mapping_{tb_log_file_name}", + text_string=f"file_path={tb_logdir} hostname={socket.gethostname()} device={torch.cuda.current_device()}", + global_step=step_count, + ) + writer.add_scaler = partial(writer.add_scalar, new_style=True) + + return writer, tb_logdir + + +class Writer: + """ + Customed writer based on tensorboard for recording training metrics. + + Args: + job_name (str): The name of training job, defaults to None. + launch_time (str): A string representing the launch time of the training. + file_name (str): The log file name, defaults to None. + tensorboard_folder (str): A string representing the folder for saving tensorboard logs. + resume_tb_folder (str): A string representing the folder for resuming tensorboard logs. + step_count (int): An integer representing the step count of the training. + config (str): A string representing the configuration of the training. + logger (logging.Logger): A logging.Logger object for logging information during training. + enable_tb (bool): A boolean indicating whether to enable the tensorboard writer. + + """ + + def __init__( + self, + job_name: str = None, + launch_time: str = None, + file_name: str = None, + tensorboard_folder: str = None, + resume_tb_folder: str = None, + step_count: int = 0, + config: str = None, + logger: logging.Logger = None, + enable_tb: bool = True, + ) -> None: + self.enable_tb = enable_tb + self.tb_writer, self.tb_logdir = init_tb_writer( + job_name=job_name, + launch_time=launch_time, + file_name=file_name, + tensorboard_folder=tensorboard_folder, + resume_tb_folder=resume_tb_folder, + step_count=step_count, + config=config, + logger=logger, + ) + + def add_scalar(self, key, value, step): + try: + if self.enable_tb and self.tb_writer is not None: + self.tb_writer.add_scalar(tag=key, scalar_value=value, global_step=step) + except Exception: + traceback.print_exc() + + def add_text(self, key, value, step): + try: + if self.enable_tb and self.tb_writer is not None: + self.tb_writer.add_text(tag=key, text_string=value, global_step=step) + except Exception: + traceback.print_exc() diff --git a/tools/README.md b/tools/README.md index ae7d196..0c78a56 100644 --- a/tools/README.md +++ b/tools/README.md @@ -30,7 +30,6 @@ $ python tools/tokenizer.py --text_input_path your_input_text_path --bin_output_ ``` 可以通过运行以下命令来生成`bin`和`meta`文件: - ```bash $ python tools/tokenizer.py --text_input_path raw_data.txt --bin_output_path cn/output.bin ``` diff --git a/tools/README_EN.md b/tools/README_EN.md index 1385acd..3105146 100644 --- a/tools/README_EN.md +++ b/tools/README_EN.md @@ -14,7 +14,6 @@ This directory provide some tools for model training with the following file str We need to use a `tokenizer` to generate `bin` and `meta` files for raw data. We import the tokenizer model by specifying the model weight path in `tools/tokenizer.py`. Currently, we provide `V7.model` to generate tokens. If you want to use a different model, you can modify the model weight path in `tokenizer.py` directly. We can run the following command to generate `bin` and `meta` files corresponding to the original data. The parameter `text_input_path` represents the path of the original text data, currently supporting `txt`, `json`, and `jsonl` formats, while `bin_output_path` represents the save path of the generated `bin` files. - ```bash $ python tools/tokenizer.py --text_input_path your_input_text_path --bin_output_path your_output_bin_path ``` diff --git a/train.py b/train.py index d272ada..de7cc7c 100644 --- a/train.py +++ b/train.py @@ -5,342 +5,76 @@ import socket import time import traceback from functools import partial -from typing import Iterable +import numpy as np import torch import torch.distributed as dist -from torch import nn -from torch.utils.data import DataLoader import internlm from internlm.core.context import ParallelMode from internlm.core.context import global_context as gpc -from internlm.core.naive_amp import NaiveAMPModel +from internlm.core.scheduler import SchedulerMetricHook from internlm.core.trainer import TrainState -from internlm.data.batch_sampler import StaticBatchSampler -from internlm.data.collaters import packed_collate_fn -from internlm.data.dummy_dataset import RandomDataset -from internlm.data.packed_dataset import ( - PackedDataset, - PackedDatasetWithoutCuSeqlen, - get_packed_dataset_without_short_length, -) -from internlm.data.utils import DATASET_TYPE_IDS_MAP +from internlm.initialize import initialize_distributed_env from internlm.model.loss import FlashGPTLMLoss -from internlm.solver.beta2_scheduler import Beta2Scheduler -from internlm.solver.lr_scheduler import FineTuneCosineAnnealingWarmupLR -from internlm.solver.optimizer.hybrid_zero_optim import HybridZeroOptimizer +from internlm.model.metrics import AccPerplex +from internlm.monitor import initialize_monitor_manager, send_alert_message +from internlm.monitor.monitor import monitor_manager as mm +from internlm.train import ( + get_train_data_loader, + get_validation_data_loader, + initialize_llm_profile, + initialize_model, + initialize_optimizer, + load_new_batch, + record_current_batch_training_metrics, +) from internlm.utils.common import ( BatchSkipper, - get_master_node, get_megatron_flops, - get_process_rank, launch_time, parse_args, ) -from internlm.utils.logger import get_logger +from internlm.utils.evaluation import evaluate_on_val_dls +from internlm.utils.logger import get_logger, initialize_uniscale_logger from internlm.utils.megatron_timers import megatron_timer as timer -from internlm.utils.model_checkpoint import ( - load_context, - load_model_checkpoint, - load_optimizer_checkpoint, - load_sampler, - load_scheduler, - save_checkpoint, -) -from internlm.utils.parallel import ( - is_no_pp_or_last_stage, - sync_model_param, - sync_model_param_within_tp, -) -from internlm.utils.registry import MODEL_INITIALIZER -from internlm.utils.simple_memory_profiler import ( - SimpleMemoryProfiler, - build_activation_config, -) +from internlm.utils.model_checkpoint import CheckpointManager +from internlm.utils.parallel import get_parallel_log_file_name +from internlm.utils.simple_memory_profiler import SimpleMemoryProfiler +from internlm.utils.writer import Writer # global llm logger logger = get_logger(__file__) -def initialize_distributed_env(config: str, launcher: str = "slurm", master_port: int = 8888, seed: int = 1024): +def initialize_llm_logger(start_time: str): """ - Initialize distributed environment for distributed training. + Initialize customed uniscale logger. Args: - config (str): Config file path. - launcher (str): Launcher for launching distributed environment, can be slurm or torch. "slurm" by default. - master_port (str): The master port for distributed training. 8888 by default. - seed (int, optional): Specified random seed for every process. 1024 by default. + start_time (str): The launch time of current training job. + + Returns: The instance of uniscale logger. """ - torch.cuda.empty_cache() - - if launcher == "torch": - internlm.launch_from_torch(config=config, seed=seed) - elif launcher == "slurm": - internlm.launch_from_slurm( - config=config, - host=get_master_node(), - port=master_port, - seed=seed, - ) - else: - assert launcher in ["slurm", "torch"], "launcher only support slurm or torch" - - -def initialize_model(): - """ - Initialize model. - - Returns: The neural network model to be trained or evaluated. - """ - - assert ( - not hasattr(gpc.config.parallel, "pipeline") or gpc.config.parallel.pipeline == 1 - ), "Pipeline parallelism is not supported for now." - - model = MODEL_INITIALIZER.get_module(module_name=gpc.config.model_type)(**(gpc.config.model)) - model = NaiveAMPModel( - model=model, - output_to_fp32=is_no_pp_or_last_stage(), - dtype=gpc.config.model.get("dtype", torch.half), - sync_buffer=False, + uniscale_logger = initialize_uniscale_logger( + job_name=gpc.config.JOB_NAME, launch_time=start_time, file_name=get_parallel_log_file_name() ) + if uniscale_logger is not None: + global logger + logger = uniscale_logger - # This sync is very important, cause the model weights kept in optimizer are copied - # from the origin parameters in the memory, so we should make sure the dp sync - # does not influence the model weights in optimizer be different with the origin parameters. - sync_model_param(model, parallel_mode=ParallelMode.DATA) - - # This function is needed to make sure parameters that are not splitted by tensor parallelism are - # the same across tensor parallelism. - sync_model_param_within_tp(model) - - return model - - -def get_train_data_loader(num_worker: int = 0): - """ - Generate and return the training data loader. - - Returns: A tuple of (train_dl, dataset_types). - """ - - # Get the dataset types - dataset_types = None - dataset_types = list(DATASET_TYPE_IDS_MAP.keys()) - data_cfg = gpc.config.data - - # Get the sample weight dictionary - train_folder = data_cfg.train_folder - - if not train_folder: - train_ds = RandomDataset(num_samples=1000000, max_len=data_cfg.seq_len) - if data_cfg.pack_sample_into_one: - train_ds = PackedDatasetWithoutCuSeqlen( - train_ds, max_length_per_sample=data_cfg.seq_len, packed_length=data_cfg.packed_length - ) - else: - train_ds = PackedDataset( - train_ds, max_length_per_sample=data_cfg.seq_len, packed_length=data_cfg.packed_length - ) - else: - train_ds = get_packed_dataset_without_short_length( - folder=data_cfg.train_folder, - packed_length=data_cfg.packed_length, - max_length_per_sample=data_cfg.seq_len, - show_progress=dist.get_rank() == 0, - min_length=data_cfg.min_length, - min_length_dict=data_cfg.get("min_length_dict", {}), - pack_into_one_sample=data_cfg.pack_sample_into_one, - ) - - # partition already completed - # assert isinstance(train_ds, (PackedDataset, PackedDatasetWithoutCuSeqlen)) - if isinstance(train_ds, (PackedDataset, PackedDatasetWithoutCuSeqlen)): - datasets = [train_ds] - else: - datasets = train_ds.datasets - - # Create the training dataset sampler - train_sampler = StaticBatchSampler( - datasets, - batch_size=data_cfg.micro_num, - rampup_batch_size=data_cfg.rampup_batch_size, - micro_bsz=data_cfg.micro_bsz, - seed=1024, - drop_last=True, - data_rank=gpc.get_local_rank(ParallelMode.DATA), - data_world_size=gpc.get_world_size(ParallelMode.DATA), - ) - - train_collate_fn = partial(packed_collate_fn, packed_length=data_cfg.packed_length) - - # Create the training data loader - train_dl = DataLoader( - dataset=train_ds, - batch_sampler=train_sampler, - num_workers=num_worker, - pin_memory=True, - collate_fn=train_collate_fn, - persistent_workers=True, - ) - - return train_dl, dataset_types - - -def load_new_batch(train_dl: DataLoader, train_iter: Iterable, train_state: TrainState): - """ - Load and return the new batch data based on training data loader. - - Args: - train_dl (torch.utils.data.DataLoader): Dataloader for training. - train_iter (Iterable): Data iterator from which get a batch of data, obtained by calling iter(dataloader). - train_state (TrainState): Current training state. - - Returns: A batch data and the updated train_iter. - """ - - timer("batch-gen").start() - try: - batch = next(train_iter) # structure is ({'input_ids': Tensor, 'cu_seqlens': Tensor}, Tensor) - next(train_state.batch_sampler_iter) - except StopIteration: - train_iter = iter(train_dl) - batch = next(train_iter) - train_state.batch_sampler_iter = iter(train_state.batch_sampler) - next(train_state.batch_sampler_iter) - train_state.num_consumed_samples_in_epoch = 0 - timer("batch-gen").stop() - - batch[0].pop("type_ids", None) - - return batch, train_iter - - -def initialize_optimizer(model: nn.Module): - """ - Initialize optimizer. - - Args: - model (torch.nn.Module): Your model instance to be trained or evaluated. - - Returns: A tuple of (optimizer, beta2_scheduler, lr_scheduler). - """ - adam_cfg = gpc.config.adam - naive_optimizer = torch.optim.AdamW( - params=[{"params": model.parameters(), "weight_decay": adam_cfg.weight_decay}], - lr=adam_cfg.lr, - betas=(adam_cfg.adam_beta1, adam_cfg.adam_beta2), - eps=adam_cfg.adam_eps, - ) - - optimizer = HybridZeroOptimizer( - naive_optimizer, grad_scal_cfg=gpc.config.grad_scaler, zero_cfg=gpc.config.hybrid_zero_optimizer - ) - - beta2_scheduler = Beta2Scheduler(optimizer=naive_optimizer, **gpc.config.beta2_scheduler) - - lr_scheduler = FineTuneCosineAnnealingWarmupLR(optimizer, **gpc.config.lr_scheduler) - - return optimizer, beta2_scheduler, lr_scheduler - - -def record_current_batch_training_metrics( - get_tflops_func, - logger, - success_update, - batch_count, - batch, - train_state, - optimizer, - beta2_scheduler, - trainer, - start_time, - loss, - grad_norm, -): - """ - Print some training metrics of current batch. - """ - - if success_update in (0, True): - train_state.num_consumed_tokens += batch[1].nelement() * gpc.get_world_size(ParallelMode.DATA) - - if success_update and gpc.is_rank_for_log(): - lr = optimizer.param_groups[0]["lr"] - if hasattr(trainer.engine.optimizer, "grad_scaler"): - scaler = trainer.engine.optimizer.grad_scaler._scale.item() - elif hasattr(trainer.engine.optimizer.optim, "grad_scaler"): - scaler = trainer.engine.optimizer.optim.grad_scaler._scale.item() - - num_tokens_in_batch = batch[1].nelement() - num_samples_in_batch = sum([len(b) - 1 for b in batch[0]["cu_seqlens"]]) - max_length_in_batch = max([(b[1:] - b[:-1]).max().item() for b in batch[0]["cu_seqlens"]]) - max_samples_in_batch = max([len(b) - 1 for b in batch[0]["cu_seqlens"]]) - min_samples_in_batch = min([len(b) - 1 for b in batch[0]["cu_seqlens"]]) - - tk_per_gpu = 0 - tk_per_gpu = round( - num_tokens_in_batch - * gpc.get_world_size(ParallelMode.DATA) - / gpc.get_world_size(ParallelMode.GLOBAL) - / (time.time() - start_time), - 2, - ) - - tflops = get_tflops_func((time.time() - start_time)) - - infos = { - "tflops": tflops, - "step": batch_count, - "loss": loss.item(), - "tgs (tokens/gpu/second)": tk_per_gpu, - "lr": lr, - "loss_scale": scaler, - "grad_norm": grad_norm, - } - - infos["micro_num"] = len(batch[1]) - infos["num_consumed_tokens"] = train_state.num_consumed_tokens - infos["inf_nan_skip_batches"] = train_state.inf_nan_skip_batches - infos["num_samples_in_batch"] = num_samples_in_batch # the number of batches which have the most samples - infos["largest_length"] = max_length_in_batch # the longest input - infos["largest_batch"] = max_samples_in_batch # the batch with the most samples - infos["smallest_batch"] = min_samples_in_batch - infos["adam_beta2"] = beta2_scheduler.get_beta2() - - line = "" - for k, v in infos.items(): - line += f"{k}={v}," - - fwd_bwd_time = round(timer("fwd-bwd").elapsed(), 2) - line += f"fwd_bwd_time={fwd_bwd_time}" - - logger.info(line) + return uniscale_logger def main(args): - # initialize distributed environment - initialize_distributed_env(config=args.config, launcher=args.launcher, master_port=args.port, seed=args.seed) - assert hasattr(gpc, "config") and gpc.config is not None - # init setting skip_batches = gpc.config.data.skip_batches total_steps = gpc.config.data.total_steps - load_optimizer = gpc.config.ckpt.load_optimizer + valid_every = gpc.config.data.valid_every label_smoothing = gpc.config.loss.label_smoothing lr = gpc.config.adam.lr - # ckpt setting - save_ckpt_folder = gpc.config.ckpt.save_ckpt_folder - enable_save_ckpt = gpc.config.ckpt.enable_ckpt - checkpoint_every = gpc.config.ckpt.checkpoint_every - - load_model_only_folder = gpc.config.ckpt.get("load_model_only_folder", None) - load_resume_ckpt_folder = gpc.config.ckpt.get("load_ckpt_folder", None) - get_tflops_func = partial( get_megatron_flops, checkpoint=gpc.config.model.checkpoint, @@ -359,25 +93,8 @@ def main(args): dist.broadcast_object_list(objs, src=0) current_time = objs[0] - model_load_path = None - if load_resume_ckpt_folder is not None: - logger.info( - f"===========Resume training from `{load_resume_ckpt_folder}` {current_time} on host:" - f"{socket.gethostname()}===========" - ) - model_load_path = load_resume_ckpt_folder - elif load_model_only_folder is not None: - logger.info( - f"===========SFT training from `{load_model_only_folder}` {current_time} on host:" - f"{socket.gethostname()}===========" - ) - model_load_path = load_model_only_folder - else: - logger.info( - f"===========New Run {current_time} on host:{socket.gethostname()}," - f"tp:{gpc.get_local_rank(ParallelMode.TENSOR)},pp={gpc.get_local_rank(ParallelMode.PIPELINE)}," - f"dp={gpc.get_local_rank(ParallelMode.DATA)}===========" - ) + # initialize customed llm logger + uniscale_logger = initialize_llm_logger(start_time=current_time) # initialize and resume train state train_state = TrainState(gpc.config) @@ -385,32 +102,66 @@ def main(args): # initialize model model = initialize_model() + with open(args.config, "r") as f: + config_lines = f.readlines() + ckpt_manager = CheckpointManager( + ckpt_config=gpc.config.ckpt, + model=model, + model_config=gpc.config.model, + model_config_file="".join(config_lines), + feishu_address=gpc.config.alert_address, + ) + # initialize loss function criterion = FlashGPTLMLoss(parallel_output=True, label_smoothing=label_smoothing) - # initialize the train data loader - train_dl, _ = get_train_data_loader(num_worker=4) + # initialize the train and validation data loader + train_dl, dataset_types = get_train_data_loader(num_worker=4) + val_dls = get_validation_data_loader() train_state.init_batch_sampler(train_dl) # Loading model weights must be done before zero is initialized. - if model_load_path is not None: - load_model_checkpoint(folder=model_load_path, model=model) + ckpt_manager.try_load_model(current_time) optimizer, beta2_scheduler, lr_scheduler = initialize_optimizer(model=model) # Loading other persistent training states. - if load_resume_ckpt_folder is not None: - # load lr scheduler states. - load_scheduler(load_resume_ckpt_folder, lr_scheduler, optimizer, lr, train_state) - # load training states. - load_context(load_resume_ckpt_folder, train_dl, train_state) - # load dataloader sampler states. - load_sampler(load_resume_ckpt_folder, train_dl.batch_sampler) - # load optimzier states. - if load_optimizer: - load_optimizer_checkpoint(load_resume_ckpt_folder, optimizer) + ckpt_manager.try_resume_training(lr_scheduler, optimizer, lr, train_state, train_dl) + + # initialize customed llm writer + writer = Writer( + job_name=gpc.config.JOB_NAME, + launch_time=current_time, + file_name=get_parallel_log_file_name(), + tensorboard_folder=gpc.config.tensorboard_folder, + resume_tb_folder=train_state.resume_tb_folder, # resume from ckpt. + step_count=train_state.step_count, # resume from ckpt. + config=config_lines, + logger=logger, + enable_tb=gpc.config.enable_tb, + ) + + # initialize metric for calculating accuracy and perplexity + metric = AccPerplex( + device=torch.cuda.current_device(), + tp_pg=gpc.get_group(ParallelMode.TENSOR), + dp_pg=gpc.get_group(ParallelMode.DATA), + dataset_types=dataset_types, + ) # initialize trainer + scheduler_hooks = [ + SchedulerMetricHook( + metric=metric, + skip=( + gpc.is_using_pp() + and hasattr(gpc.config.model, "num_chunks") + and gpc.config.model.num_chunks > 1 + and gpc.config.parallel["pipeline"].get("interleaved_overlap", False) + ), + ), + ] + trainer, train_dl, _, _ = internlm.initialize_trainer( model=model, optimizer=optimizer, @@ -418,17 +169,17 @@ def main(args): train_dataloader=train_dl, lr_scheduler=lr_scheduler, beta2_scheduler=beta2_scheduler, + scheduler_hooks=scheduler_hooks, ) # initialize simple memory profiler if args.profiling: memory_profiler = SimpleMemoryProfiler( - model.model, + model, optimizer.optim, log_folder=f"memory_trace/rank{gpc.get_global_rank()}_" + f"dp{gpc.get_local_rank(ParallelMode.DATA)}_" + f"tp{gpc.get_local_rank(ParallelMode.TENSOR)}", - activation_config=build_activation_config(gpc.config.model.num_layers), ) else: memory_profiler = None @@ -441,89 +192,118 @@ def main(args): # transfer the train data loader into train data iterator train_iter = iter(train_dl) - # start iterating the train data and begin training - for batch_count in range(train_state.batch_count, total_steps): - if batch_count % 50 == 0: - torch.cuda.empty_cache() + with initialize_llm_profile(profiling=args.profiling, start_time=current_time) as prof: + # start iterating the train data and begin training + for batch_count in range(train_state.batch_count, total_steps): + if batch_count % 50 == 0: + torch.cuda.empty_cache() - start_time = time.time() - timer("one-batch").start() + start_time = time.time() + timer("one-batch").start() - # load batch data - batch, train_iter = load_new_batch(train_dl=train_dl, train_iter=train_iter, train_state=train_state) + # load batch data + batch, train_iter = load_new_batch(train_dl=train_dl, train_iter=train_iter, train_state=train_state) - # record the consumed samples in training - train_state.batch_count = batch_count - train_state.num_consumed_samples_in_epoch += len(batch[1]) - if batch_skipper(batch_count): # skip this batch - if gpc.is_rank_for_log(): - logger.info(f"Skip batch count:`{batch_count}`...") - timer("one-batch").stop() - continue + # record the consumed samples in training + train_state.batch_count = batch_count + train_state.num_consumed_samples_in_epoch += len(batch[1]) + if batch_skipper(batch_count): # skip this batch + if gpc.is_rank_for_log(): + logger.info(f"Skip batch count:`{batch_count}`...") + timer("one-batch").stop() + continue - # zero the grads of parameters - trainer.zero_grad() + # zero the grads of parameters + trainer.zero_grad() + # process data + if batch[0].get("type_ids", None) is not None: + metric.set_current_type_ids(type_ids=batch[0].pop("type_ids", None)) - # do forward and backward - timer("fwd-bwd").start() - _, _, loss = trainer.execute_schedule(batch, forward_only=False, return_loss=True, return_output_label=False) - timer("fwd-bwd").stop() - assert loss is not None + # do forward and backward + timer("fwd-bwd").start() - # update parameters, and returns (success_update, grad_norm) - trainer_result = trainer.step() - assert trainer_result is not None + _, _, loss = trainer.execute_schedule( + batch, forward_only=False, return_loss=True, return_output_label=False + ) + timer("fwd-bwd").stop() - success_update, grad_norm = trainer_result - if success_update: # update parameters successfully - train_state.step_count += 1 - else: - train_state.inf_nan_skip_batches += 1 # record the amount of updating parameters unsuccessfully. - if grad_norm == -99.0 and gpc.is_rank_for_log(): # -99.0 encodes a specific failure case - logger.warning(f"Warning: skip parameter update at step {batch_count}.") + # update parameters, and returns (success_update, grad_norm) + trainer_result = trainer.step() + assert trainer_result is not None - # calculate and record the training metrics, eg. loss, accuracy and so on. - record_current_batch_training_metrics( - get_tflops_func=get_tflops_func, - logger=logger, - success_update=success_update, - batch_count=batch_count, - batch=batch, - train_state=train_state, - optimizer=optimizer, - beta2_scheduler=beta2_scheduler, - trainer=trainer, - start_time=start_time, - loss=loss, - grad_norm=grad_norm, - ) + success_update, grad_norm_groups = trainer_result + if success_update: # update parameters successfully + train_state.step_count += 1 + else: + train_state.inf_nan_skip_batches += 1 # record the amount of updating parameters unsuccessfully. + if -1 in grad_norm_groups and gpc.is_rank_for_log(): # -1 encodes a specific failure case + logger.warning(f"Warning: skip parameter update at step {batch_count}.") + send_alert_message( + address=gpc.config.alert_address, + message=f"Warning: skip parameter update at step {batch_count}.", + ) - timer("one-batch").stop() - - if memory_profiler is not None: - memory_profiler.step() - - # checkpoint the training states in specific steps, which is determined by the args "checkpoint_every" - # # save batch sampler that tracks the true consumed samples - if enable_save_ckpt and train_state.step_count % checkpoint_every == 0: - save_checkpoint( - folder=save_ckpt_folder, - model=model, - optimizer=optimizer, - scheduler=lr_scheduler, + # calculate and record the training metrics, eg. loss, accuracy and so on. + record_current_batch_training_metrics( + get_tflops_func=get_tflops_func, + logger=logger, + writer=writer, + success_update=success_update, + batch_count=batch_count, + batch=batch, train_state=train_state, - model_config=gpc.config.model, + optimizer=optimizer, + beta2_scheduler=beta2_scheduler, + trainer=trainer, + start_time=start_time, + loss=loss, + grad_norm=np.array(grad_norm_groups), + metric=metric, + update_panel=uniscale_logger is not None, ) - # wait for all checkpoint uploads to be completed - dist.barrier() + timer("one-batch").stop() + + # evaluate on validation data loaders + if valid_every > 0 and train_state.step_count % valid_every == 0: + evaluate_on_val_dls( + trainer=trainer, + val_dls=val_dls, + writer=writer, + logger=logger, + step_count=train_state.step_count, + update_panel=uniscale_logger is not None, + ) + + # checkpoint the training states in specific steps, which is determined by the args "checkpoint_every" + # # save batch sampler that tracks the true consumed samples + now_break = ckpt_manager.try_save_checkpoint(train_state) + if now_break: + break + + if memory_profiler is not None: + memory_profiler.step() + + if batch_count % 2 == 0: + prof.step() + + ckpt_manager.wait_async_upload_finish() if __name__ == "__main__": args = parse_args() + hostname = socket.gethostname() - try: - main(args) - except Exception: - print(f"Raise exception from {socket.gethostname()} with proc id: {get_process_rank()}") - traceback.print_exc() + # initialize distributed environment + initialize_distributed_env(config=args.config, launcher=args.launcher, master_port=args.port, seed=args.seed) + assert hasattr(gpc, "config") and gpc.config is not None + + # initialize monitor manager context + with initialize_monitor_manager(job_name=gpc.config.JOB_NAME, alert_address=gpc.config.alert_address): + try: + main(args) + except Exception: + logger.error( + f"Raise exception from {hostname} with rank id: {gpc.get_global_rank()}\n{traceback.format_exc()}", + ) + mm.monitor_exception(alert_address=gpc.config.alert_address, excp_info=traceback.format_exc()) diff --git a/web_demo.py b/web_demo.py index e9334cd..4091a98 100644 --- a/web_demo.py +++ b/web_demo.py @@ -1,23 +1,20 @@ """ -This script refers to the dialogue example of streamlit, the interactive generation code of chatglm2 and transformers. We mainly modified part of the code logic to adapt to the generation of our model. +This script refers to the dialogue example of streamlit, the interactive generation code of chatglm2 and transformers. +We mainly modified part of the code logic to adapt to the generation of our model. Please refer to these links below for more information: 1. streamlit chat example: https://docs.streamlit.io/knowledge-base/tutorials/build-conversational-apps 2. chatglm2: https://github.com/THUDM/ChatGLM2-6B 3. transformers: https://github.com/huggingface/transformers """ +from dataclasses import asdict + import streamlit as st import torch -from dataclasses import dataclass, asdict -from typing import List, Optional, Callable, Optional -import copy -import warnings -import logging from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.utils import logging -from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList -from tools.transformers.interface import generate_interactive, GenerationConfig +from tools.transformers.interface import GenerationConfig, generate_interactive logger = logging.get_logger(__name__) @@ -25,9 +22,14 @@ logger = logging.get_logger(__name__) def on_btn_click(): del st.session_state.messages + @st.cache_resource def load_model(): - model = AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True).to(torch.bfloat16).cuda() + model = ( + AutoModelForCausalLM.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True) + .to(torch.bfloat16) + .cuda() + ) tokenizer = AutoTokenizer.from_pretrained("internlm/internlm-chat-7b", trust_remote_code=True) return model, tokenizer @@ -35,20 +37,12 @@ def load_model(): def prepare_generation_config(): with st.sidebar: max_length = st.slider("Max Length", min_value=32, max_value=2048, value=2048) - top_p = st.slider( - 'Top P', 0.0, 1.0, 0.8, step=0.01 - ) - temperature = st.slider( - 'Temperature', 0.0, 1.0, 0.7, step=0.01 - ) + top_p = st.slider("Top P", 0.0, 1.0, 0.8, step=0.01) + temperature = st.slider("Temperature", 0.0, 1.0, 0.7, step=0.01) st.button("Clear Chat History", on_click=on_btn_click) - - generation_config = GenerationConfig( - max_length=max_length, - top_p=top_p, - temperature=temperature - ) - + + generation_config = GenerationConfig(max_length=max_length, top_p=top_p, temperature=temperature) + return generation_config @@ -74,16 +68,16 @@ def combine_history(prompt): def main(): - #torch.cuda.empty_cache() + # torch.cuda.empty_cache() print("load model begin.") model, tokenizer = load_model() print("load model end.") - + user_avator = "doc/imgs/user.png" robot_avator = "doc/imgs/robot.png" - + st.title("InternLM-Chat-7B") - + generation_config = prepare_generation_config() # Initialize chat history @@ -106,22 +100,20 @@ def main(): with st.chat_message("robot", avatar=robot_avator): message_placeholder = st.empty() - for cur_response in generate_interactive(model=model, tokenizer=tokenizer, prompt=real_prompt, additional_eos_token_id=103028, **asdict(generation_config)): + for cur_response in generate_interactive( + model=model, + tokenizer=tokenizer, + prompt=real_prompt, + additional_eos_token_id=103028, + **asdict(generation_config), + ): # Display robot response in chat message container message_placeholder.markdown(cur_response + "▌") message_placeholder.markdown(cur_response) # Add robot response to chat history st.session_state.messages.append({"role": "robot", "content": cur_response, "avatar": robot_avator}) torch.cuda.empty_cache() - + if __name__ == "__main__": main() - - - - - - - - From 28635755f52adef113099856756fb1195dc9a8c6 Mon Sep 17 00:00:00 2001 From: YWMditto <46778265+YWMditto@users.noreply.github.com> Date: Sat, 26 Aug 2023 17:48:08 +0800 Subject: [PATCH 02/34] [fix bug] Fix the error that RotaryEmbedding is converted to a non-fp32 format during training, and add a compatible method for the llama model. (#239) Co-authored-by: YWMditto <862779238@qq.com> --- internlm/model/embedding.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/internlm/model/embedding.py b/internlm/model/embedding.py index 8c59aaf..d4ae9b5 100644 --- a/internlm/model/embedding.py +++ b/internlm/model/embedding.py @@ -137,15 +137,13 @@ class RotaryEmbedding(torch.nn.Module): """ """ super().__init__() # Generate and save the inverse frequency buffer (non trainable) - inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim)) - self.register_buffer("inv_freq", inv_freq) + self.inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, device=device, dtype=torch.float32) / dim)) self.scale_base = scale_base - scale = ( + self.scale = ( (torch.arange(0, dim, 2, device=device, dtype=torch.float32) + 0.4 * dim) / (1.4 * dim) if scale_base > 0 else None ) - self.register_buffer("scale", scale) self._seq_len_cached = 0 self._cos_cached = None @@ -220,3 +218,15 @@ class RotaryEmbedding(torch.nn.Module): self._cos_k_cached[seqlen_offset:], self._sin_k_cached[seqlen_offset:], ) + + def _single_forward(self, x, indexes=0): + assert self.scale is None + self._update_cos_sin_cache(x, indexes) + x = x[None, ...] + ret = legacy_apply_rotary_embed(x, self._cos_cached[indexes], self._sin_cached[indexes]).squeeze(0) + return ret + + def _single_eval_forward(self, x, seqlen_offset=0): + assert self.scale is None + self._update_cos_sin_cache(x, seqlen_offset + x.shape[1]) + return legacy_apply_rotary_embed(x, self._cos_cached[seqlen_offset:], self._sin_cached[seqlen_offset:]) From 4e1ddffcaf74bd705c8842b777a80d2ba9893460 Mon Sep 17 00:00:00 2001 From: huangting4201 <1538303371@qq.com> Date: Mon, 28 Aug 2023 11:23:08 +0800 Subject: [PATCH 03/34] feat(docs): update readme (#240) Co-authored-by: huangting4201 --- doc/en/structure.md | 5 ++++- doc/en/usage.md | 10 +++++++--- doc/structure.md | 5 ++++- doc/usage.md | 8 ++++++-- 4 files changed, 21 insertions(+), 7 deletions(-) diff --git a/doc/en/structure.md b/doc/en/structure.md index 75cb95f..5b50e93 100644 --- a/doc/en/structure.md +++ b/doc/en/structure.md @@ -6,11 +6,14 @@ The system code file structure is shown below: ├── internlm # Main directory of the system code │ ├── apis # Interface module, containing some interface functions related to inference, etc. │ ├── core # Core module, managing parallel context and training scheduling engine for training and inference +│ │ ├── communication # Communication module, responsible for p2p communication in pipeline parallel scheduling │ │ ├── context # Context module, mainly responsible for initializing parallel process groups and managing parallel context │ │ │ ├── parallel_context.py │ │ │ └── process_group_initializer.py +│ │ ├── scheduler # Scheduling module, which manages schedulers for parallel training, including non-pipeline and pipeline parallel schedulers +│ │ │ ├── no_pipeline_scheduler.py +│ │ │ └── pipeline_scheduler.py │ │ ├── engine.py # Responsible for managing the training and evaluation process of the model -│ │ ├── no_pipeline_scheduler.py # Scheduler for parallel training │ │ └── trainer.py # Responsible for managing the training engine and scheduler │ ├── data # Data module, responsible for managing dataset generation and processing │ ├── initialize # Initialization module, responsible for managing distributed environment startup and trainer initialization diff --git a/doc/en/usage.md b/doc/en/usage.md index 0f62ebc..e286edc 100644 --- a/doc/en/usage.md +++ b/doc/en/usage.md @@ -165,8 +165,9 @@ Training parallel configuration example: ```python parallel = dict( zero1=8, - pipeline=1, tensor=1, + pipeline=dict(size=1, interleaved_overlap=True), + sequence_parallel=False, ) ``` @@ -174,8 +175,11 @@ parallel = dict( - When `size <= 0`, the size of the zero1 process group is equal to the size of the data parallel process group, so the optimizer state parameters will be split within the data parallel range. - When `size == 1`, zero1 is not used, and all data parallel groups retain the complete optimizer state parameters. - When `size > 1` and `size <= data_parallel_world_size`, the zero1 process group is a subset of the data parallel process group. -- pipeline: pipeline parallel size, default value is 1 -- tensor: tensor parallel size, usually the number of GPUs per node, default value is 1 +- tensor: tensor parallel size, usually the number of GPUs per node, default is 1 +- pipeline: pipeline parallel strategy + - size: pipeline parallel size, the default value is 1 + - interleaved_overlap: bool type, when interleaved scheduling, enable or disable communication optimization, the default value is False +- sequence_parallel: Whether to enable sequence parallelism, the default value is False Note: `Data parallel size = Total number of GPUs / Pipeline parallel size / Tensor parallel size` diff --git a/doc/structure.md b/doc/structure.md index e9fbe3a..2893438 100644 --- a/doc/structure.md +++ b/doc/structure.md @@ -6,11 +6,14 @@ ├── internlm # 系统代码的主目录 │ ├── apis # 接口模块,包含一些关于推理等的接口函数 │ ├── core # 核心模块,管理用于训练和推理的 parallel context 和训练调度引擎 +│ │ ├── communication # 通信模块,负责流水线并行调度中的p2p通信 │ │ ├── context # context 模块,主要负责初始化并行进程组,并管理 parallel context │ │ │ ├── parallel_context.py │ │ │ └── process_group_initializer.py +│ │ ├── scheduler # 调度模块,管理并行训练的调度器,包括非流水线并行调度器和流水线并行调度器 +│ │ │ ├── no_pipeline_scheduler.py +│ │ │ └── pipeline_scheduler.py │ │ ├── engine.py # 负责管理模型的训练和评估过程 -│ │ ├── no_pipeline_scheduler.py # 并行训练的调度器 │ │ └── trainer.py # 负责管理训练引擎和调度器 │ ├── data # 数据模块,负责管理数据集生成和处理 │ ├── initialize # 初始化模块,负责管理分布式环境启动和训练器初始化 diff --git a/doc/usage.md b/doc/usage.md index 8c9a455..11a4394 100644 --- a/doc/usage.md +++ b/doc/usage.md @@ -151,16 +151,20 @@ model = dict( ```python parallel = dict( zero1=8, - pipeline=1, tensor=1, + pipeline=dict(size=1, interleaved_overlap=True), + sequence_parallel=False, ) ``` - zero1:zero 并行策略,分如下三种情况,默认值为 -1 - 当`size <= 0`,则 zero1 进程组的大小等于数据并行进程组的大小,因此优化器状态参数将在数据并行范围内分配 - 当`size == 1`,则不使用 zero1 ,所有数据并行组保留完整的优化器状态参数 - 当`size > 1`且`size <= data_parallel_world_size`,则 zero1 进程组是数据并行进程组的子集 -- pipeline:流水线并行大小,默认值为 1 - tensor:张量并行大小,通常是每个节点的 GPU 数量,默认值为 1 +- pipeline:流水线并行策略 + - size:流水线并行大小,默认值为 1 + - interleaved_overlap:bool 类型,交错式调度时,开启或关闭通信优化,默认值为关闭 +- sequence_parallel:是否开启序列化并行,默认值为 False 注意:`数据并行大小 = 总的 GPU 数目 / 流水线并行大小 / 张量并行大小` From 992499d00d1ffac574523fb748a5a56baa7658ce Mon Sep 17 00:00:00 2001 From: huangting4201 <1538303371@qq.com> Date: Tue, 29 Aug 2023 13:54:41 +0800 Subject: [PATCH 04/34] docs(doc/code-docs): support readthedocs (#245) * feat(doc/code-docs): add code-docs for readthedocs * feat(doc/code-docs): add .readthedocs.yaml configuration file * feat(doc/code-docs): update .readthedocs.yaml configuration file * feat(doc/code-docs): update .readthedocs.yaml configuration file * feat(doc/code-docs): update .readthedocs.yaml configuration file * feat(doc/code-docs): update .readthedocs.yaml configuration file * feat(doc/code-docs): update code-docs --- .readthedocs.yml | 28 ++++++++++++ doc/code-docs/Makefile | 20 +++++++++ doc/code-docs/make.bat | 35 +++++++++++++++ doc/code-docs/requirements.txt | 6 +++ doc/code-docs/source/checkpoint.rst | 2 + doc/code-docs/source/conf.py | 62 +++++++++++++++++++++++++ doc/code-docs/source/index.rst | 70 +++++++++++++++++++++++++++++ doc/code-docs/source/initialize.rst | 35 +++++++++++++++ doc/code-docs/source/install.md | 70 +++++++++++++++++++++++++++++ doc/code-docs/source/monitor.rst | 10 +++++ doc/code-docs/source/parallel.rst | 23 ++++++++++ doc/code-docs/source/profiler.rst | 11 +++++ doc/code-docs/source/training.rst | 2 + 13 files changed, 374 insertions(+) create mode 100644 .readthedocs.yml create mode 100644 doc/code-docs/Makefile create mode 100644 doc/code-docs/make.bat create mode 100644 doc/code-docs/requirements.txt create mode 100644 doc/code-docs/source/checkpoint.rst create mode 100644 doc/code-docs/source/conf.py create mode 100644 doc/code-docs/source/index.rst create mode 100644 doc/code-docs/source/initialize.rst create mode 100644 doc/code-docs/source/install.md create mode 100644 doc/code-docs/source/monitor.rst create mode 100644 doc/code-docs/source/parallel.rst create mode 100644 doc/code-docs/source/profiler.rst create mode 100644 doc/code-docs/source/training.rst diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000..650ee88 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,28 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.8" + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: doc/code-docs/source/conf.py + fail_on_warning: false + +# Optionally build your docs in additional formats such as PDF +formats: + - pdf + +# Optional but recommended, declare the Python requirements required +# to build your documentation +# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + install: + - requirements: doc/code-docs/requirements.txt diff --git a/doc/code-docs/Makefile b/doc/code-docs/Makefile new file mode 100644 index 0000000..d0c3cbf --- /dev/null +++ b/doc/code-docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/doc/code-docs/make.bat b/doc/code-docs/make.bat new file mode 100644 index 0000000..747ffb7 --- /dev/null +++ b/doc/code-docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/doc/code-docs/requirements.txt b/doc/code-docs/requirements.txt new file mode 100644 index 0000000..9a4bb3d --- /dev/null +++ b/doc/code-docs/requirements.txt @@ -0,0 +1,6 @@ +Sphinx +sphinx-autobuild +recommonmark +sphinx_rtd_theme +sphinx_markdown_tables +autodoc_pydantic==1.9 \ No newline at end of file diff --git a/doc/code-docs/source/checkpoint.rst b/doc/code-docs/source/checkpoint.rst new file mode 100644 index 0000000..3ceed08 --- /dev/null +++ b/doc/code-docs/source/checkpoint.rst @@ -0,0 +1,2 @@ +Model Checkpointing +=================== \ No newline at end of file diff --git a/doc/code-docs/source/conf.py b/doc/code-docs/source/conf.py new file mode 100644 index 0000000..5986f06 --- /dev/null +++ b/doc/code-docs/source/conf.py @@ -0,0 +1,62 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +import os +import sys + +project = "InternLM" +copyright = "2023, InternLM Team" +author = "InternLM Team" +release = "v0.2.0" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +extensions = [ + "recommonmark", + "sphinx_rtd_theme", + "sphinx.ext.viewcode", + "sphinx.ext.autodoc", + "sphinxcontrib.autodoc_pydantic", + "sphinx.ext.autosectionlabel", + "sphinx.ext.napoleon", +] + +pygments_style = "sphinx" + +# autodoc_pyandtic config +autodoc_pydantic_model_show_field_summary = False +autodoc_pydantic_field_signature_prefix = " " +autodoc_pydantic_model_signature_prefix = "class" +autodoc_pydantic_model_show_json = False +autodoc_pydantic_model_show_config_summary = False +autodoc_pydantic_model_show_config_member = False +autodoc_pydantic_model_show_validator_summary = False +autodoc_pydantic_model_show_validator_members = False +autodoc_pydantic_model_summary_list_order = "bysource" +autodoc_pydantic_model_member_order = "bysource" +autodoc_pydantic_field_list_validators = False + +templates_path = ["_templates"] + +exclude_patterns = [] + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_theme = "sphinx_rtd_theme" +html_static_path = ["_static"] + +sys.path.insert(0, os.path.abspath("../../../")) + +# Prepend module names to class descriptions +add_module_names = True + +autoclass_content = "init" + +autodoc_mock_imports = ["apex", "torch"] diff --git a/doc/code-docs/source/index.rst b/doc/code-docs/source/index.rst new file mode 100644 index 0000000..3011df6 --- /dev/null +++ b/doc/code-docs/source/index.rst @@ -0,0 +1,70 @@ +.. InternLM documentation master file, created by + sphinx-quickstart on Mon Aug 28 17:33:28 2023. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +InternLM +======== + +Environment Setup +------------------- + +.. toctree:: + :maxdepth: 2 + + install + +Model Setup +------------------- + +.. toctree:: + :maxdepth: 2 + + initialize + +Training API +------------------- + +.. toctree:: + :maxdepth: 2 + + training + +Parallel Training +------------------- + +.. toctree:: + :maxdepth: 2 + + parallel + +Model Checkpointing +------------------- + +.. toctree:: + :maxdepth: 2 + + checkpoint + +Profiler +------------------- + +.. toctree:: + :maxdepth: 2 + + profiler + +Monitor +------------------- + +.. toctree:: + :maxdepth: 2 + + monitor + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/doc/code-docs/source/initialize.rst b/doc/code-docs/source/initialize.rst new file mode 100644 index 0000000..a638c33 --- /dev/null +++ b/doc/code-docs/source/initialize.rst @@ -0,0 +1,35 @@ +Training Setup +============== + +.. _InternLM-args: + +Argument Parsing +---------------- +InternLM uses the `argparse `_ library to supply commandline +configuration to the InternLM runtime. Use ``internlm.initialize.get_default_parser()`` to get InternLM's default +parser with some builtin arguments, users can add custom parameters to this parser. + +.. code-block:: python + + # Get InternLM default parser + parser = internlm.initialize.get_default_parser() + # Add new argument + parser.add_argument("--user_arg", type=int, default=-1, help="arguments add by user.") + cmd_args = parser.parse_args() + +.. autofunction:: internlm.initialize.get_default_parser + + +.. _InternLM-init: + +Model Initialization +------------------------- + +Optimizer Initialization +------------------------- + +Dataloader Initialization +------------------------- + +Trainer Initialization +------------------------- diff --git a/doc/code-docs/source/install.md b/doc/code-docs/source/install.md new file mode 100644 index 0000000..26f57c0 --- /dev/null +++ b/doc/code-docs/source/install.md @@ -0,0 +1,70 @@ +## Installation + +### Environment Preparation +The required packages and corresponding version are shown as follows: +- Python == 3.10 +- GCC == 10.2.0 +- MPFR == 4.1.0 +- CUDA >= 11.7 +- Pytorch >= 1.13.1 +- Transformers >= 4.28.0 +- Flash-Attention >= v1.0.5 +- Apex == 23.05 +- GPU with Ampere or Hopper architecture (such as H100, A100) +- Linux OS + +After installing the above dependencies, some system environment variables need to be updated: +```bash +export CUDA_PATH={path_of_cuda_11.7} +export GCC_HOME={path_of_gcc_10.2.0} +export MPFR_HOME={path_of_mpfr_4.1.0} +export LD_LIBRARY_PATH=${GCC_HOME}/lib64:${MPFR_HOME}/lib:${CUDA_PATH}/lib64:$LD_LIBRARY_PATH +export PATH=${GCC_HOME}/bin:${CUDA_PATH}/bin:$PATH +export CC=${GCC_HOME}/bin/gcc +export CXX=${GCC_HOME}/bin/c++ +``` + +### Environment Installation +Clone the project `internlm` and its dependent submodules from the github repository, as follows: +```bash +git clone git@github.com:InternLM/InternLM.git --recurse-submodules +``` + +It is recommended to build a Python-3.10 virtual environment using conda and install the required dependencies based on the `requirements/` files: +```bash +conda create --name internlm-env python=3.10 -y +conda activate internlm-env +cd internlm +pip install -r requirements/torch.txt +pip install -r requirements/runtime.txt +``` + +Install flash-attention (version v1.0.5): +```bash +cd ./third_party/flash-attention +python setup.py install +cd ./csrc +cd fused_dense_lib && pip install -v . +cd ../xentropy && pip install -v . +cd ../rotary && pip install -v . +cd ../layer_norm && pip install -v . +cd ../../../../ +``` + +Install Apex (version 23.05): +```bash +cd ./third_party/apex +pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ +cd ../../ +``` + +### Environment Image +Users can obtain an image with the InternLM runtime environment installed from https://hub.docker.com/r/sunpengsdu/internlm. The commands for pulling the image and starting the container are as follows: + +```bash +# pull image +docker pull sunpengsdu/internlm:torch1.13-cuda11.7-flashatten1.0.5-centos +# start container +docker run --gpus all -d -it --shm-size=2gb --name myinternlm sunpengsdu/internlm:torch1.13-cuda11.7-flashatten1.0.5-centos +docker exec -it myinternlm bash +``` diff --git a/doc/code-docs/source/monitor.rst b/doc/code-docs/source/monitor.rst new file mode 100644 index 0000000..ff8cd1b --- /dev/null +++ b/doc/code-docs/source/monitor.rst @@ -0,0 +1,10 @@ +Monitor and Alert +================= + + +Monitoring +----------------- + + +Alerting +----------------- diff --git a/doc/code-docs/source/parallel.rst b/doc/code-docs/source/parallel.rst new file mode 100644 index 0000000..3515847 --- /dev/null +++ b/doc/code-docs/source/parallel.rst @@ -0,0 +1,23 @@ +Parallel Training +================= + +.. 整体说一下并行配置使用方式,接下来再分模块详细说明 + +Tensor Parallel +----------------- + + +Pipeline Parallel +----------------- + + +Sequence Parallel +----------------- + + +Data Parallel +----------------- + + +ZeRO1.5 +----------------- \ No newline at end of file diff --git a/doc/code-docs/source/profiler.rst b/doc/code-docs/source/profiler.rst new file mode 100644 index 0000000..c10f425 --- /dev/null +++ b/doc/code-docs/source/profiler.rst @@ -0,0 +1,11 @@ +Profiler +======== + +.. 可介绍torch profiler, memory profiler的使用 + +Torch Profiler +----------------- + + +Memory Profiler +----------------- \ No newline at end of file diff --git a/doc/code-docs/source/training.rst b/doc/code-docs/source/training.rst new file mode 100644 index 0000000..e9ee124 --- /dev/null +++ b/doc/code-docs/source/training.rst @@ -0,0 +1,2 @@ +Training API +============ \ No newline at end of file From fc4b8918c420a0915c339028ffc108a8a8ddfb6d Mon Sep 17 00:00:00 2001 From: li126com <43110891+li126com@users.noreply.github.com> Date: Tue, 29 Aug 2023 16:14:59 +0800 Subject: [PATCH 05/34] Standard and experiment docker (#220) * feat:standard docker image * feat:standard docker image * feat: standard dockerfile * feat: standard dockerfile * feat: standard dockerfile * feat: standard dockerfile * feat: standard dockerfile * feat: standard dockerfile * feat: standard dockerfile * experiment and standard docker * experiment and standard docker --- doc/en/install.md | 28 ++++-- doc/install.md | 29 +++++-- docker.Makefile | 107 +++++++++++++++++++++++ docker/Dockerfile-centos | 131 ++++++++++++++++++++++++++++ docker/Dockerfile-ubuntu | 112 ++++++++++++++++++++++++ experiment/Dockerfile-centos | 161 +++++++++++++++++++++++++++++++++++ experiment/Dockerfile-ubuntu | 142 ++++++++++++++++++++++++++++++ experiment/README-CN.md | 25 ++++++ experiment/README-EN.md | 25 ++++++ 9 files changed, 748 insertions(+), 12 deletions(-) create mode 100644 docker.Makefile create mode 100644 docker/Dockerfile-centos create mode 100644 docker/Dockerfile-ubuntu create mode 100644 experiment/Dockerfile-centos create mode 100644 experiment/Dockerfile-ubuntu create mode 100644 experiment/README-CN.md create mode 100644 experiment/README-EN.md diff --git a/doc/en/install.md b/doc/en/install.md index 0c721b9..591cb5d 100644 --- a/doc/en/install.md +++ b/doc/en/install.md @@ -59,12 +59,28 @@ cd ../../ ``` ### Environment Image -Users can obtain an image with the InternLM runtime environment installed from https://hub.docker.com/r/sunpengsdu/internlm. The commands for pulling the image and starting the container are as follows: +Users can use the provided dockerfile combined with docker.Makefile to build their own images, or obtain images with InternLM runtime environment installed from https://hub.docker.com/r/internlm/internlm. + +#### Image Configuration and Build +The configuration and build of the Dockerfile are implemented through the docker.Makefile. To build the image, execute the following command in the root directory of InternLM: +``` bash +make -f docker.Makefile BASE_OS=centos7 +``` +In docker.Makefile, you can customize the basic image, environment version, etc., and the corresponding parameters can be passed directly through the command line. For BASE_OS, ubuntu20.04 and centos7 are respectively supported. + +#### Pull Standard Image +The standard image based on ubuntu and centos has been built and can be directly pulled: ```bash -# pull image -docker pull sunpengsdu/internlm:torch1.13-cuda11.7-flashatten1.0.5-centos -# start container -docker run --gpus all -d -it --shm-size=2gb --name myinternlm sunpengsdu/internlm:torch1.13-cuda11.7-flashatten1.0.5-centos -docker exec -it myinternlm bash +# ubuntu20.04 +docker pull internlm/internlm:torch1.13.1-cuda11.7.1-flashatten1.0.5-ubuntu20.04 +# centos7 +docker pull internlm/internlm:torch1.13.1-cuda11.7.1-flashatten1.0.5-centos7 ``` + +#### Run Container +For the local standard image built with dockerfile or pulled, use the following command to run and enter the container: +```bash +docker run --gpus all -it -m 500g --cap-add=SYS_PTRACE --cap-add=IPC_LOCK --shm-size 20g --network=host --name myinternlm internlm/internlm:torch1.13.1-cuda11.7.1-flashatten1.0.5-centos7 bash +``` +The default directory in the container is `/InternLM`, please start training according to the [Usage](./usage.md). diff --git a/doc/install.md b/doc/install.md index d5e547e..97578d6 100644 --- a/doc/install.md +++ b/doc/install.md @@ -59,11 +59,28 @@ cd ../../ ``` ### 环境镜像 -用户可以从 https://hub.docker.com/r/sunpengsdu/internlm 获取安装了 InternLM 运行环境的镜像,拉取镜像及启动容器的命令如下: +用户可以使用提供的 dockerfile 结合 docker.Makefile 来构建自己的镜像,或者也可以从 https://hub.docker.com/r/internlm/internlm 获取安装了 InternLM 运行环境的镜像。 + +#### 镜像配置及构造 +dockerfile 的配置以及构造均通过 docker.Makefile 文件实现,在 InternLM 根目录下执行如下命令即可 build 镜像: +``` bash +make -f docker.Makefile BASE_OS=centos7 +``` +在 docker.Makefile 中可自定义基础镜像,环境版本等内容,对应参数可直接通过命令行传递。对于 BASE_OS 分别支持 ubuntu20.04 和 centos7。 + +#### 镜像拉取 +基于 ubuntu 和 centos 的标准镜像已经 build 完成也可直接拉取使用: + ```bash -# 拉取镜像 -docker pull sunpengsdu/internlm:torch1.13-cuda11.7-flashatten1.0.5-centos -# 启动容器 -docker run --gpus all -d -it --shm-size=2gb --name myinternlm sunpengsdu/internlm:torch1.13-cuda11.7-flashatten1.0.5-centos -docker exec -it myinternlm bash +# ubuntu20.04 +docker pull internlm/internlm:torch1.13.1-cuda11.7.1-flashatten1.0.5-ubuntu20.04 +# centos7 +docker pull internlm/internlm:torch1.13.1-cuda11.7.1-flashatten1.0.5-centos7 ``` + +#### 容器启动 +对于使用 dockerfile 构建或拉取的本地标准镜像,使用如下命令启动并进入容器: +```bash +docker run --gpus all -it -m 500g --cap-add=SYS_PTRACE --cap-add=IPC_LOCK --shm-size 20g --network=host --name myinternlm internlm/internlm:torch1.13.1-cuda11.7.1-flashatten1.0.5-centos7 bash +``` +容器内默认目录即 `/InternLM`,根据[使用文档](./usage.md)即可启动训练。 diff --git a/docker.Makefile b/docker.Makefile new file mode 100644 index 0000000..21ce55a --- /dev/null +++ b/docker.Makefile @@ -0,0 +1,107 @@ +DOCKER_REGISTRY ?= docker.io +DOCKER_ORG ?= my +DOCKER_IMAGE ?= internlm +DOCKER_FULL_NAME = $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(DOCKER_IMAGE) + +CUDA_VERSION = 11.7.1 +GCC_VERSION = 10.2.0 + +CUDNN_VERSION = 8 +BASE_RUNTIME = +# ubuntu20.04 centos7 +BASE_OS = centos7 +BASE_DEVEL = nvidia/cuda:$(CUDA_VERSION)-cudnn$(CUDNN_VERSION)-devel-${BASE_OS} +# The conda channel to use to install cudatoolkit +CUDA_CHANNEL = nvidia +# The conda channel to use to install pytorch / torchvision +INSTALL_CHANNEL ?= pytorch + +PYTHON_VERSION ?= 3.10 +PYTORCH_VERSION ?= 1.13.1 +TORCHVISION_VERSION ?= 0.14.1 +TORCHAUDIO_VERSION ?= 0.13.1 +BUILD_PROGRESS ?= auto +TRITON_VERSION ?= +GMP_VERSION ?= 6.2.1 +MPFR_VERSION ?= 4.1.0 +MPC_VERSION ?= 1.2.1 +GCC_VERSION ?= 10.2.0 +HTTPS_PROXY_I ?= +HTTP_PROXY_I ?= +FLASH_ATTEN_VERSION ?= 1.0.5 +FLASH_ATTEN_TAG ?= v${FLASH_ATTEN_VERSION} + +BUILD_ARGS = --build-arg BASE_IMAGE=$(BASE_IMAGE) \ + --build-arg PYTHON_VERSION=$(PYTHON_VERSION) \ + --build-arg CUDA_VERSION=$(CUDA_VERSION) \ + --build-arg CUDA_CHANNEL=$(CUDA_CHANNEL) \ + --build-arg PYTORCH_VERSION=$(PYTORCH_VERSION) \ + --build-arg TORCHVISION_VERSION=$(TORCHVISION_VERSION) \ + --build-arg TORCHAUDIO_VERSION=$(TORCHAUDIO_VERSION) \ + --build-arg INSTALL_CHANNEL=$(INSTALL_CHANNEL) \ + --build-arg TRITON_VERSION=$(TRITON_VERSION) \ + --build-arg GMP_VERSION=$(GMP_VERSION) \ + --build-arg MPFR_VERSION=$(MPFR_VERSION) \ + --build-arg MPC_VERSION=$(MPC_VERSION) \ + --build-arg GCC_VERSION=$(GCC_VERSION) \ + --build-arg https_proxy=$(HTTPS_PROXY_I) \ + --build-arg http_proxy=$(HTTP_PROXY_I) \ + --build-arg FLASH_ATTEN_TAG=$(FLASH_ATTEN_TAG) + +EXTRA_DOCKER_BUILD_FLAGS ?= + +BUILD ?= build +# Intentionally left blank +PLATFORMS_FLAG ?= +PUSH_FLAG ?= +USE_BUILDX ?=1 +BUILD_PLATFORMS ?= +WITH_PUSH ?= false +BUILD_TYPE ?= intrenlm-dev + +# Setup buildx flags +ifneq ("$(USE_BUILDX)","") +BUILD = buildx build +ifneq ("$(BUILD_PLATFORMS)","") +PLATFORMS_FLAG = --platform="$(BUILD_PLATFORMS)" +endif +endif +# endif + +# # Only set platforms flags if using buildx +# ifeq ("$(WITH_PUSH)","true") +# PUSH_FLAG = --push +# endif +# endif + +ifeq ($(findstring centos,$(BASE_OS)),centos) + DOCKERFILE_PATH ?= ./docker/Dockerfile-centos +else + DOCKERFILE_PATH ?= ./docker/Dockerfile-ubuntu +endif + +#use -f to specify dockerfile +DOCKER_BUILD = DOCKER_BUILDKIT=1 \ + docker $(BUILD) \ + --progress=$(BUILD_PROGRESS) \ + $(EXTRA_DOCKER_BUILD_FLAGS) \ + $(PLATFORMS_FLAG) \ + $(PUSH_FLAG) \ + -f $(DOCKERFILE_PATH) \ + -t $(DOCKER_FULL_NAME):$(DOCKER_TAG) \ + $(BUILD_ARGS) . + + # --target $(BUILD_TYPE) + +.PHONY: all +all: devel-image + +.PHONY: devel-image +devel-image: BASE_IMAGE := $(BASE_DEVEL) +devel-image: DOCKER_TAG := torch${PYTORCH_VERSION}-cuda${CUDA_VERSION}-flashatten${FLASH_ATTEN_VERSION}-${BASE_OS} +devel-image: + $(DOCKER_BUILD) + +.PHONY: clean +clean: + -docker rmi -f $(shell docker images -q $(DOCKER_FULL_NAME)) \ No newline at end of file diff --git a/docker/Dockerfile-centos b/docker/Dockerfile-centos new file mode 100644 index 0000000..917d28f --- /dev/null +++ b/docker/Dockerfile-centos @@ -0,0 +1,131 @@ +ARG BASE_IMAGE +ARG https_proxy +ARG http_proxy + +############################################################################## +# Install the basic environment on centos +############################################################################## +FROM ${BASE_IMAGE} as base +ARG https_proxy +ARG http_proxy +RUN yum install deltarpm -y && yum update -y \ + && yum install -y \ + ca-certificates \ + cmake \ + curl \ + git \ + wget \ + tar \ + m4 \ + bzip2 \ + gcc \ + gcc-c++ \ + file \ + texinfo \ + which + + +############################################################################## +# Install the conda environment +############################################################################## +FROM base as conda +ARG PYTHON_VERSION=3.10 +ARG TARGETPLATFORM +ARG https_proxy +ARG http_proxy +RUN case ${TARGETPLATFORM} in \ + "linux/arm64") MINICONDA_ARCH=aarch64 ;; \ + *) MINICONDA_ARCH=x86_64 ;; \ + esac && \ + curl -fsSL -v -o ~/miniconda.sh -O "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-${MINICONDA_ARCH}.sh" + +RUN chmod +x ~/miniconda.sh && \ + bash ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda install -y python=${PYTHON_VERSION} cmake conda-build pyyaml numpy ipython && \ + /opt/conda/bin/conda clean -ya + + +############################################################################## +# Install environment dependencies +############################################################################## +FROM conda as dep +WORKDIR /dep +ARG https_proxy +ARG http_proxy +ARG GMP_VERSION +ARG MPFR_VERSION +ARG MPC_VERSION +RUN wget https://ftp.gnu.org/gnu/gmp/gmp-${GMP_VERSION}.tar.bz2 \ + && tar -vxf gmp-${GMP_VERSION}.tar.bz2 \ + && cd gmp-${GMP_VERSION}/ \ + && ./configure --prefix=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget https://ftp.gnu.org/gnu/mpfr/mpfr-${MPFR_VERSION}.tar.gz \ + && tar -vxf mpfr-${MPFR_VERSION}.tar.gz \ + && cd mpfr-${MPFR_VERSION}/ \ + && ./configure --prefix=/usr/local/mpfr-${MPFR_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget http://www.multiprecision.org/downloads/mpc-${MPC_VERSION}.tar.gz \ + && tar -vxf mpc-${MPC_VERSION}.tar.gz \ + && cd mpc-${MPC_VERSION}/ \ + && ./configure --prefix=/usr/local/mpc-${MPC_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && git clone https://github.com/ninja-build/ninja.git \ + && cd ninja \ + && git checkout release \ + && ./configure.py --bootstrap \ + && mv ./ninja /usr/bin \ + && cd .. + +ENV MPFR_HOME=/usr/local/mpfr-${MPFR_VERSION} +ENV LD_LIBRARY_PATH=${MPFR_HOME}/lib:$LD_LIBRARY_PATH + +ARG https_proxy +ARG http_proxy +ARG GCC_VERSION +ARG GMP_VERSION +ARG MPFR_VERSION +ARG MPC_VERSION +RUN wget https://ftp.gnu.org/gnu/gcc/gcc-${GCC_VERSION}/gcc-${GCC_VERSION}.tar.xz \ + && tar -vxf gcc-${GCC_VERSION}.tar.xz \ + && mkdir build \ + && cd build/ \ + && ../gcc-${GCC_VERSION}/configure --prefix=/usr/local/gcc-${GCC_VERSION}/ --enable-threads=posix --disable-checking --enable-languages=c,c++ --disable-multilib \ + --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} --with-mpc=/usr/local/mpc-${MPC_VERSION} \ + && make -j64 && make install + +ENV GCC_HOME=/usr/local/gcc-${GCC_VERSION} +ENV LD_LIBRARY_PATH=${GCC_HOME}/lib64:${CUDA_PATH}/lib64:$LD_LIBRARY_PATH +ENV PATH=${GCC_HOME}/bin:${CUDA_PATH}/bin:$PATH +ENV CC=${GCC_HOME}/bin/gcc +ENV CXX=${GCC_HOME}/bin/c++ + + +############################################################################## +# Install InternLM development environment, including flash-attention and apex +############################################################################## +FROM dep as intrenlm-dev +COPY . /InternLM +WORKDIR /InternLM +ARG https_proxy +ARG http_proxy +ARG TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" +RUN git submodule update --init --recursive \ + && /opt/conda/bin/pip --no-cache-dir install -r requirements/torch.txt \ + && /opt/conda/bin/pip --no-cache-dir install -r requirements/runtime.txt \ + && cd /InternLM/third_party/flash-attention \ + && /opt/conda/bin/python setup.py install \ + && cd ./csrc \ + && cd fused_dense_lib && /opt/conda/bin/pip install -v . \ + && cd ../xentropy && /opt/conda/bin/pip install -v . \ + && cd ../rotary && /opt/conda/bin/pip install -v . \ + && cd ../layer_norm && /opt/conda/bin/pip install -v . \ + && cd ../../../../ \ + && cd ./third_party/apex \ + && /opt/conda/bin/pip --no-cache-dir install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \ + && /opt/conda/bin/pip cache purge \ + && rm -rf ~/.cache/pip \ No newline at end of file diff --git a/docker/Dockerfile-ubuntu b/docker/Dockerfile-ubuntu new file mode 100644 index 0000000..e73421a --- /dev/null +++ b/docker/Dockerfile-ubuntu @@ -0,0 +1,112 @@ +ARG BASE_IMAGE +ARG https_proxy +ARG http_proxy + +############################################################################## +# Install the basic environment on ubuntu +############################################################################## +FROM ${BASE_IMAGE} as base +ARG https_proxy +ARG http_proxy +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + build-essential \ + ca-certificates \ + cmake \ + curl \ + git \ + wget \ + tar \ + m4 \ + ninja-build + + +############################################################################## +# Install the conda environment +############################################################################## +FROM base as conda +ARG PYTHON_VERSION=3.10 +ARG TARGETPLATFORM +ARG https_proxy +ARG http_proxy +RUN case ${TARGETPLATFORM} in \ + "linux/arm64") MINICONDA_ARCH=aarch64 ;; \ + *) MINICONDA_ARCH=x86_64 ;; \ + esac && \ + curl -fsSL -v -o ~/miniconda.sh -O "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-${MINICONDA_ARCH}.sh" + +RUN chmod +x ~/miniconda.sh && \ + bash ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda install -y python=${PYTHON_VERSION} cmake conda-build pyyaml numpy ipython && \ + /opt/conda/bin/conda clean -ya + + +############################################################################## +# Install environment dependencies +############################################################################## +FROM conda as dep +WORKDIR /dep +ARG https_proxy +ARG http_proxy +ARG GCC_VERSION +ARG GMP_VERSION +ARG MPFR_VERSION +ARG MPC_VERSION +RUN wget https://ftp.gnu.org/gnu/gmp/gmp-${GMP_VERSION}.tar.bz2 \ + && tar -vxf gmp-${GMP_VERSION}.tar.bz2 \ + && cd gmp-${GMP_VERSION}/ \ + && ./configure --prefix=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget https://ftp.gnu.org/gnu/mpfr/mpfr-${MPFR_VERSION}.tar.gz \ + && tar -vxf mpfr-${MPFR_VERSION}.tar.gz \ + && cd mpfr-${MPFR_VERSION}/ \ + && ./configure --prefix=/usr/local/mpfr-${MPFR_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget http://www.multiprecision.org/downloads/mpc-${MPC_VERSION}.tar.gz \ + && tar -vxf mpc-${MPC_VERSION}.tar.gz \ + && cd mpc-${MPC_VERSION}/ \ + && ./configure --prefix=/usr/local/mpc-${MPC_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget https://ftp.gnu.org/gnu/gcc/gcc-${GCC_VERSION}/gcc-${GCC_VERSION}.tar.xz \ + && tar -vxJf gcc-${GCC_VERSION}.tar.xz \ + && mkdir build \ + && cd build/ \ + && ../gcc-${GCC_VERSION}/configure --prefix=/usr/local/gcc-${GCC_VERSION}/ --enable-checking=release --enable-languages=c,c++ --disable-multilib \ + --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} --with-mpc=/usr/local/mpc-${MPC_VERSION} \ + && make -j64 && make install + +ENV GCC_HOME=/usr/local/gcc-${GCC_VERSION} +ENV MPFR_HOME=/usr/local/mpfr-${MPFR_VERSION} +ENV LD_LIBRARY_PATH=${GCC_HOME}/lib64:${MPFR_HOME}/lib:${CUDA_PATH}/lib64:$LD_LIBRARY_PATH +ENV PATH=${GCC_HOME}/bin:${CUDA_PATH}/bin:$PATH +ENV CC=${GCC_HOME}/bin/gcc +ENV CXX=${GCC_HOME}/bin/c++ + + +############################################################################## +# Install InternLM development environment, including flash-attention and apex +############################################################################## +FROM dep as intrenlm-dev +COPY . /InternLM +WORKDIR /InternLM +ARG https_proxy +ARG http_proxy +ARG TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" +RUN git submodule update --init --recursive \ + && /opt/conda/bin/pip --no-cache-dir install -r requirements/torch.txt \ + && /opt/conda/bin/pip --no-cache-dir install -r requirements/runtime.txt \ + && cd /InternLM/third_party/flash-attention \ + && /opt/conda/bin/python setup.py install \ + && cd ./csrc \ + && cd fused_dense_lib && /opt/conda/bin/pip install -v . \ + && cd ../xentropy && /opt/conda/bin/pip install -v . \ + && cd ../rotary && /opt/conda/bin/pip install -v . \ + && cd ../layer_norm && /opt/conda/bin/pip install -v . \ + && cd ../../../../ \ + && cd ./third_party/apex \ + && /opt/conda/bin/pip --no-cache-dir install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \ + && /opt/conda/bin/pip cache purge \ + && rm -rf ~/.cache/pip \ No newline at end of file diff --git a/experiment/Dockerfile-centos b/experiment/Dockerfile-centos new file mode 100644 index 0000000..31ffc19 --- /dev/null +++ b/experiment/Dockerfile-centos @@ -0,0 +1,161 @@ +ARG BASE_IMAGE +ARG https_proxy +ARG http_proxy + +############################################################################## +# Install the basic environment on centos +############################################################################## +FROM ${BASE_IMAGE} as base +ARG https_proxy +ARG http_proxy +RUN yum install deltarpm -y && yum update -y \ + && yum install -y \ + ca-certificates \ + cmake \ + curl \ + git \ + wget \ + tar \ + m4 \ + bzip2 \ + gcc \ + gcc-c++ \ + file \ + texinfo \ + which + + +############################################################################## +# Install the conda environment +############################################################################## +FROM base as conda +ARG PYTHON_VERSION=3.10 +ARG TARGETPLATFORM +ARG https_proxy +ARG http_proxy +RUN case ${TARGETPLATFORM} in \ + "linux/arm64") MINICONDA_ARCH=aarch64 ;; \ + *) MINICONDA_ARCH=x86_64 ;; \ + esac && \ + curl -fsSL -v -o ~/miniconda.sh -O "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-${MINICONDA_ARCH}.sh" + +RUN chmod +x ~/miniconda.sh && \ + bash ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda install -y python=${PYTHON_VERSION} cmake conda-build pyyaml numpy ipython && \ + /opt/conda/bin/conda clean -ya + + +############################################################################## +# Install environment dependencies +############################################################################## +FROM conda as dep +WORKDIR /dep +ARG https_proxy +ARG http_proxy +ARG GMP_VERSION +ARG MPFR_VERSION +ARG MPC_VERSION +RUN wget https://ftp.gnu.org/gnu/gmp/gmp-${GMP_VERSION}.tar.bz2 \ + && tar -vxf gmp-${GMP_VERSION}.tar.bz2 \ + && cd gmp-${GMP_VERSION}/ \ + && ./configure --prefix=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget https://ftp.gnu.org/gnu/mpfr/mpfr-${MPFR_VERSION}.tar.gz \ + && tar -vxf mpfr-${MPFR_VERSION}.tar.gz \ + && cd mpfr-${MPFR_VERSION}/ \ + && ./configure --prefix=/usr/local/mpfr-${MPFR_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget http://www.multiprecision.org/downloads/mpc-${MPC_VERSION}.tar.gz \ + && tar -vxf mpc-${MPC_VERSION}.tar.gz \ + && cd mpc-${MPC_VERSION}/ \ + && ./configure --prefix=/usr/local/mpc-${MPC_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && git clone https://github.com/ninja-build/ninja.git \ + && cd ninja \ + && git checkout release \ + && ./configure.py --bootstrap \ + && mv ./ninja /usr/bin \ + && cd .. + +ENV MPFR_HOME=/usr/local/mpfr-${MPFR_VERSION} +ENV LD_LIBRARY_PATH=${MPFR_HOME}/lib:$LD_LIBRARY_PATH + +ARG https_proxy +ARG http_proxy +ARG GCC_VERSION +ARG GMP_VERSION +ARG MPFR_VERSION +ARG MPC_VERSION +RUN wget https://ftp.gnu.org/gnu/gcc/gcc-${GCC_VERSION}/gcc-${GCC_VERSION}.tar.xz \ + && tar -vxf gcc-${GCC_VERSION}.tar.xz \ + && mkdir build \ + && cd build/ \ + && ../gcc-${GCC_VERSION}/configure --prefix=/usr/local/gcc-${GCC_VERSION}/ --enable-threads=posix --disable-checking --enable-languages=c,c++ --disable-multilib \ + --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} --with-mpc=/usr/local/mpc-${MPC_VERSION} \ + && make -j64 && make install + +ENV GCC_HOME=/usr/local/gcc-${GCC_VERSION} +ENV LD_LIBRARY_PATH=${GCC_HOME}/lib64:${CUDA_PATH}/lib64:$LD_LIBRARY_PATH +ENV PATH=${GCC_HOME}/bin:${CUDA_PATH}/bin:$PATH +ENV CC=${GCC_HOME}/bin/gcc +ENV CXX=${GCC_HOME}/bin/c++ + + +############################################################################## +# Install InternLM development environment, including flash-attention and apex +############################################################################## +FROM dep as intrenlm-dev +COPY . /InternLM +WORKDIR /InternLM +ARG https_proxy +ARG http_proxy +ARG PYTORCH_VERSION +ARG TORCHVISION_VERSION +ARG TORCHAUDIO_VERSION + +RUN /opt/conda/bin/pip --no-cache-dir install \ + transformers==4.29.2 \ + sentencepiece \ + numpy \ + tqdm \ + psutil \ + packaging \ + pre-commit \ + ninja \ + gputil \ + pytest \ + packaging \ + boto3 \ + botocore \ + torch-scatter \ + pyecharts \ + -f https://data.pyg.org/whl/torch-${PYTORCH_VERSION}+cu117.html \ + && /opt/conda/bin/pip --no-cache-dir install \ + --extra-index-url https://download.pytorch.org/whl/cu117 \ + torch==${PYTORCH_VERSION}+cu117 \ + torchvision==${TORCHVISION_VERSION}+cu117 \ + torchaudio==${TORCHAUDIO_VERSION} + +ARG https_proxy +ARG http_proxy +ARG TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" +ARG FLASH_ATTEN_TAG + +RUN git submodule update --init --recursive \ + && cd /InternLM/third_party/flash-attention \ + && git checkout ${FLASH_ATTEN_TAG} \ + && /opt/conda/bin/python setup.py install \ + && cd ./csrc \ + && cd fused_dense_lib && /opt/conda/bin/pip install -v . \ + && cd ../xentropy && /opt/conda/bin/pip install -v . \ + && cd ../rotary && /opt/conda/bin/pip install -v . \ + && cd ../layer_norm && /opt/conda/bin/pip install -v . \ + && cd ../../../../ \ + && cd ./third_party/apex \ + && /opt/conda/bin/pip --no-cache-dir install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \ + && /opt/conda/bin/pip cache purge \ + && rm -rf ~/.cache/pip \ No newline at end of file diff --git a/experiment/Dockerfile-ubuntu b/experiment/Dockerfile-ubuntu new file mode 100644 index 0000000..230a3b5 --- /dev/null +++ b/experiment/Dockerfile-ubuntu @@ -0,0 +1,142 @@ +ARG BASE_IMAGE +ARG https_proxy +ARG http_proxy + +############################################################################## +# Install the basic environment on ubuntu +############################################################################## +FROM ${BASE_IMAGE} as base +ARG https_proxy +ARG http_proxy +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + build-essential \ + ca-certificates \ + cmake \ + curl \ + git \ + wget \ + tar \ + m4 \ + ninja-build + + +############################################################################## +# Install the conda environment +############################################################################## +FROM base as conda +ARG PYTHON_VERSION=3.10 +ARG TARGETPLATFORM +ARG https_proxy +ARG http_proxy +RUN case ${TARGETPLATFORM} in \ + "linux/arm64") MINICONDA_ARCH=aarch64 ;; \ + *) MINICONDA_ARCH=x86_64 ;; \ + esac && \ + curl -fsSL -v -o ~/miniconda.sh -O "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-${MINICONDA_ARCH}.sh" + +RUN chmod +x ~/miniconda.sh && \ + bash ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda install -y python=${PYTHON_VERSION} cmake conda-build pyyaml numpy ipython && \ + /opt/conda/bin/conda clean -ya + + +############################################################################## +# Install environment dependencies +############################################################################## +FROM conda as dep +WORKDIR /dep +ARG https_proxy +ARG http_proxy +ARG GCC_VERSION +ARG GMP_VERSION +ARG MPFR_VERSION +ARG MPC_VERSION +RUN wget https://ftp.gnu.org/gnu/gmp/gmp-${GMP_VERSION}.tar.bz2 \ + && tar -vxf gmp-${GMP_VERSION}.tar.bz2 \ + && cd gmp-${GMP_VERSION}/ \ + && ./configure --prefix=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget https://ftp.gnu.org/gnu/mpfr/mpfr-${MPFR_VERSION}.tar.gz \ + && tar -vxf mpfr-${MPFR_VERSION}.tar.gz \ + && cd mpfr-${MPFR_VERSION}/ \ + && ./configure --prefix=/usr/local/mpfr-${MPFR_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget http://www.multiprecision.org/downloads/mpc-${MPC_VERSION}.tar.gz \ + && tar -vxf mpc-${MPC_VERSION}.tar.gz \ + && cd mpc-${MPC_VERSION}/ \ + && ./configure --prefix=/usr/local/mpc-${MPC_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget https://ftp.gnu.org/gnu/gcc/gcc-${GCC_VERSION}/gcc-${GCC_VERSION}.tar.xz \ + && tar -vxJf gcc-${GCC_VERSION}.tar.xz \ + && mkdir build \ + && cd build/ \ + && ../gcc-${GCC_VERSION}/configure --prefix=/usr/local/gcc-${GCC_VERSION}/ --enable-checking=release --enable-languages=c,c++ --disable-multilib \ + --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} --with-mpc=/usr/local/mpc-${MPC_VERSION} \ + && make -j64 && make install + +ENV GCC_HOME=/usr/local/gcc-${GCC_VERSION} +ENV MPFR_HOME=/usr/local/mpfr-${MPFR_VERSION} +ENV LD_LIBRARY_PATH=${GCC_HOME}/lib64:${MPFR_HOME}/lib:${CUDA_PATH}/lib64:$LD_LIBRARY_PATH +ENV PATH=${GCC_HOME}/bin:${CUDA_PATH}/bin:$PATH +ENV CC=${GCC_HOME}/bin/gcc +ENV CXX=${GCC_HOME}/bin/c++ + + +############################################################################## +# Install InternLM development environment, including flash-attention and apex +############################################################################## +FROM dep as intrenlm-dev +COPY . /InternLM +WORKDIR /InternLM +ARG https_proxy +ARG http_proxy +ARG PYTORCH_VERSION +ARG TORCHVISION_VERSION +ARG TORCHAUDIO_VERSION + +RUN /opt/conda/bin/pip --no-cache-dir install \ + transformers==4.29.2 \ + sentencepiece \ + numpy \ + tqdm \ + psutil \ + packaging \ + pre-commit \ + ninja \ + gputil \ + pytest \ + packaging \ + boto3 \ + botocore \ + torch-scatter \ + pyecharts \ + -f https://data.pyg.org/whl/torch-${PYTORCH_VERSION}+cu117.html \ + && /opt/conda/bin/pip --no-cache-dir install \ + --extra-index-url https://download.pytorch.org/whl/cu117 \ + torch==${PYTORCH_VERSION}+cu117 \ + torchvision==${TORCHVISION_VERSION}+cu117 \ + torchaudio==${TORCHAUDIO_VERSION} + +ARG https_proxy +ARG http_proxy +ARG TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" +ARG FLASH_ATTEN_TAG + +RUN git submodule update --init --recursive \ + && cd /InternLM/third_party/flash-attention \ + && git checkout ${FLASH_ATTEN_TAG} \ + && /opt/conda/bin/python setup.py install \ + && cd ./csrc \ + && cd fused_dense_lib && /opt/conda/bin/pip install -v . \ + && cd ../xentropy && /opt/conda/bin/pip install -v . \ + && cd ../rotary && /opt/conda/bin/pip install -v . \ + && cd ../layer_norm && /opt/conda/bin/pip install -v . \ + && cd ../../../../ \ + && cd ./third_party/apex \ + && /opt/conda/bin/pip --no-cache-dir install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \ + && /opt/conda/bin/pip cache purge \ + && rm -rf ~/.cache/pip \ No newline at end of file diff --git a/experiment/README-CN.md b/experiment/README-CN.md new file mode 100644 index 0000000..1f96cc7 --- /dev/null +++ b/experiment/README-CN.md @@ -0,0 +1,25 @@ +## 实验性环境镜像 +本模块用于测试新版本环境,默认测试新环境 torch=2.0.1,flash-attention=2.1.0。新环境可能具有不稳定性,标准环境安装请参考:[安装文档](../doc/install.md) + +### 镜像构建及拉取 +构建镜像时请于 InternLM 根目录下执行 docker.Makefile,该文件与标准环境镜像共用,所使用的 Dockerfile 位于 experiment 目录下。也可直接从 https://hub.docker.com/r/internlm/internlm 拉取镜像,命令如下: +```bash +# 构建镜像 +# ubuntu20.04 +make -f docker.Makefile BASE_OS=ubuntu20.04 DOCKERFILE_PATH=./experiment/Dockerfile-ubuntu PYTORCH_VERSION=2.0.1 TORCHVISION_VERSION=0.15.2 TORCHAUDIO_VERSION=2.0.2 FLASH_ATTEN_VERSION=2.1.0 +# centos7 +make -f docker.Makefile BASE_OS=centos7 DOCKERFILE_PATH=./experiment/Dockerfile-centos PYTORCH_VERSION=2.0.1 TORCHVISION_VERSION=0.15.2 TORCHAUDIO_VERSION=2.0.2 FLASH_ATTEN_VERSION=2.1.0 + +# 拉取镜像 +# ubuntu20.04 +docker pull internlm/internlm:experiment-torch2.0.1-flashatten2.1.0-ubuntu20.04 +# centos7 +docker pull internlm/internlm:experiment-torch2.0.1-flashatten2.1.0-centos7 +``` + +### 容器启动 +对于使用 dockerfile 构建或拉取的本地标准镜像,使用如下命令启动并进入容器: +```bash +docker run --gpus all -it -m 500g --cap-add=SYS_PTRACE --cap-add=IPC_LOCK --shm-size 20g --network=host --name myinternlm internlm/internlm:experiment-torch2.0.1-flashatten2.1.0-centos7 bash +``` +容器内默认目录即 `/InternLM`,根据[使用文档](../doc/usage.md)即可启动训练。 \ No newline at end of file diff --git a/experiment/README-EN.md b/experiment/README-EN.md new file mode 100644 index 0000000..f9bae2b --- /dev/null +++ b/experiment/README-EN.md @@ -0,0 +1,25 @@ +## Environment Image for experiment +This module is used to test the new version environment, the default test new environment is torch=2.0.1, flash-attention=2.1.0. The new environment may be unstable, for the standard environment installation please refer to: [installation guide](../doc/en/install.md) + +### Build and Pull Image +When building the image, please make docker.Makefile in the InternLM root directory. This Makefile is shared with the standard environment image, and the Dockerfile used is located in the experiment directory. You can also pull the image directly from https://hub.docker.com/r/internlm/internlm, the command is as follows: +```bash +# Build Image +# ubuntu20.04 +make -f docker.Makefile BASE_OS=ubuntu20.04 DOCKERFILE_PATH=./experiment/Dockerfile-ubuntu PYTORCH_VERSION=2.0.1 TORCHVISION_VERSION=0.15.2 TORCHAUDIO_VERSION=2.0.2 FLASH_ATTEN_VERSION=2.1.0 +# centos7 +make -f docker.Makefile BASE_OS=centos7 DOCKERFILE_PATH=./experiment/Dockerfile-centos PYTORCH_VERSION=2.0.1 TORCHVISION_VERSION=0.15.2 TORCHAUDIO_VERSION=2.0.2 FLASH_ATTEN_VERSION=2.1.0 + +# Pull Image +# ubuntu20.04 +docker pull internlm/internlm:experiment-torch2.0.1-flashatten2.1.0-ubuntu20.04 +# centos7 +docker pull internlm/internlm:experiment-torch2.0.1-flashatten2.1.0-centos7 +``` + +### Run Container +For the local standard image built with dockerfile or pulled, use the following command to run and enter the container: +```bash +docker run --gpus all -it -m 500g --cap-add=SYS_PTRACE --cap-add=IPC_LOCK --shm-size 20g --network=host --name myinternlm internlm/internlm:experiment-torch2.0.1-flashatten2.1.0-centos7 bash +``` +The default directory in the container is `/InternLM`, please start training according to the [Usage](../doc/en/usage.md). \ No newline at end of file From b84d937478afeff052d089162d551d9d5c1373f6 Mon Sep 17 00:00:00 2001 From: huangting4201 <1538303371@qq.com> Date: Tue, 29 Aug 2023 18:47:21 +0800 Subject: [PATCH 06/34] fix(core/trainer.py): fix streaming train state load error (#247) --- internlm/core/trainer.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/internlm/core/trainer.py b/internlm/core/trainer.py index a027fed..2839ad9 100644 --- a/internlm/core/trainer.py +++ b/internlm/core/trainer.py @@ -78,8 +78,9 @@ class TrainState: self.step_count = other_stuffs.get("step_count", other_stuffs["batch_count"]) + 1 # track the actual updates of sampler when using weighted sampling - self.batch_sampler = train_dl.batch_sampler.copy() - self.batch_sampler_iter = iter(self.batch_sampler) + if hasattr(self, "batch_sampler"): + self.batch_sampler = train_dl.batch_sampler.copy() + self.batch_sampler_iter = iter(self.batch_sampler) # resume tensorboard from older tensorboard_folder self.resume_tb_folder = other_stuffs.get("tensorboard_folder", None) From c70819af92d5f0d40f5ecd5be1f86c1f5597010b Mon Sep 17 00:00:00 2001 From: li126com <43110891+li126com@users.noreply.github.com> Date: Thu, 31 Aug 2023 15:28:46 +0800 Subject: [PATCH 07/34] Fix requirement (#243) * feat:standard docker image * feat:standard docker image * fix: a little problem * fix: a little problem --- requirements/runtime.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements/runtime.txt b/requirements/runtime.txt index c0b345f..f46d7ad 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -13,4 +13,4 @@ boto3 botocore torch-scatter pyecharts --f https://data.pyg.org/whl/torch-1.13.0+cu117.html \ No newline at end of file +-f https://data.pyg.org/whl/torch-1.13.1+cu117.html \ No newline at end of file From ba4deaeffa676d22636a314ce65b3a08bc18bb0c Mon Sep 17 00:00:00 2001 From: Shuo Zhang Date: Thu, 31 Aug 2023 15:29:04 +0800 Subject: [PATCH 08/34] fix(eval): StreamingDataset does not have an __len__ method. (#251) --- internlm/utils/evaluation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internlm/utils/evaluation.py b/internlm/utils/evaluation.py index f1b2a20..872ef87 100644 --- a/internlm/utils/evaluation.py +++ b/internlm/utils/evaluation.py @@ -76,7 +76,7 @@ def evaluate_on_val_dls( data_cfg = gpc.config.data for val_name, val_dl in val_dls.items(): - if len(val_dl) == 0 and verbose and not streaming: + if not streaming and len(val_dl) == 0 and verbose: logger.info(f"Validation dataset: {val_name} is empty") continue From c92aa06bd8a35e203cb5ed0db1712d583ad8b38f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ryan=20=28=E5=BC=A0=E7=A3=8A=29?= Date: Thu, 31 Aug 2023 17:44:39 +0800 Subject: [PATCH 09/34] fix(metric): argument missing in getting loss metrics. (#256) --- internlm/model/metrics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internlm/model/metrics.py b/internlm/model/metrics.py index 1749aa2..24ce592 100644 --- a/internlm/model/metrics.py +++ b/internlm/model/metrics.py @@ -176,7 +176,7 @@ class AccPerplex: res.update(ds_acc) res.update(ds_tokens) - loss_res = self.loss_with_type_id.get_metric() + loss_res = self.loss_with_type_id.get_metric(reset) res.update(loss_res) return res From f79586b0c61a7651f76f78ee489533abfa94922c Mon Sep 17 00:00:00 2001 From: Pryest <54388244+Pryest@users.noreply.github.com> Date: Fri, 1 Sep 2023 01:12:53 +0800 Subject: [PATCH 10/34] feat(model): implement uniform_init for tensor. (#252) * Implement uniform_init for tensor. * Fix functinal calling bugs: normal->uniform. * Format editting: remove unused torch importing. --- internlm/initialize/initialize_tensor.py | 35 ++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 3 deletions(-) diff --git a/internlm/initialize/initialize_tensor.py b/internlm/initialize/initialize_tensor.py index 2580ca0..b317f26 100644 --- a/internlm/initialize/initialize_tensor.py +++ b/internlm/initialize/initialize_tensor.py @@ -3,16 +3,15 @@ import math -import torch from torch import Tensor, nn -def scaled_init_method_normal(sigma, num_layers): +def scaled_init_method_normal(sigma: float = 1.0, num_layers: int = 1): """Init method based on N(0, sigma/sqrt(2*num_layers).""" std = sigma / math.sqrt(2.0 * num_layers) def init_(tensor): - return torch.nn.init.normal_(tensor, mean=0.0, std=std) + return nn.init.normal_(tensor, mean=0.0, std=std) return init_ @@ -32,3 +31,33 @@ def normal_(mean: float = 0.0, std: float = 1.0): return nn.init.normal_(tensor, mean, std) return initializer + + +def scaled_init_method_uniform(sigma: float = 1.0, num_layers: int = 1): + """Init method based on p(x)=Uniform(-a, a) where std(x)=sigma/sqrt(2*num_layers).""" + std = sigma / math.sqrt(2.0 * num_layers) + a = math.sqrt(3.0 * std) + + def init_(tensor): + return nn.init.uniform_(tensor, -a, a) + + return init_ + + +def uniform_(mean: float = 0.0, std: float = 1.0): + r"""Return the initializer filling the input Tensor with values drawn from the uniform distribution + + .. math:: + \mathcal{U}(mean-a, mean+a), where a satisfies \mathcal{U}_{std}=std. + + Args: + mean (float): the mean of the uniform distribution. Defaults 0.0. + std (float): the standard deviation of the uniform distribution. Defaults 1.0. + """ + + a = math.sqrt(3.0 * std) + + def initializer(tensor: Tensor): + return nn.init.uniform_(tensor, mean - a, mean + a) + + return initializer From fca1df20aebfc69560e23577cbd1e020e61e4691 Mon Sep 17 00:00:00 2001 From: Sun Peng Date: Fri, 1 Sep 2023 10:23:35 +0800 Subject: [PATCH 11/34] [Daily Pull] Merge Main to Develop 20230901 (#260) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Standard and experiment docker (#220) * feat:standard docker image * feat:standard docker image * feat: standard dockerfile * feat: standard dockerfile * feat: standard dockerfile * feat: standard dockerfile * feat: standard dockerfile * feat: standard dockerfile * feat: standard dockerfile * experiment and standard docker * experiment and standard docker * fix(core/trainer.py): fix streaming train state load error (#247) * Fix requirement (#243) * feat:standard docker image * feat:standard docker image * fix: a little problem * fix: a little problem * fix(eval): StreamingDataset does not have an __len__ method. (#251) * fix(metric): argument missing in getting loss metrics. (#256) * feat(model): implement uniform_init for tensor. (#252) * Implement uniform_init for tensor. * Fix functinal calling bugs: normal->uniform. * Format editting: remove unused torch importing. --------- Co-authored-by: li126com <43110891+li126com@users.noreply.github.com> Co-authored-by: huangting4201 <1538303371@qq.com> Co-authored-by: Shuo Zhang Co-authored-by: Ryan (张磊) Co-authored-by: Pryest <54388244+Pryest@users.noreply.github.com> --- doc/en/install.md | 28 +++- doc/install.md | 29 +++- docker.Makefile | 107 +++++++++++++++ docker/Dockerfile-centos | 131 ++++++++++++++++++ docker/Dockerfile-ubuntu | 112 ++++++++++++++++ experiment/Dockerfile-centos | 161 +++++++++++++++++++++++ experiment/Dockerfile-ubuntu | 142 ++++++++++++++++++++ experiment/README-CN.md | 25 ++++ experiment/README-EN.md | 25 ++++ internlm/core/trainer.py | 5 +- internlm/initialize/initialize_tensor.py | 35 ++++- internlm/model/metrics.py | 2 +- internlm/utils/evaluation.py | 2 +- requirements/runtime.txt | 2 +- 14 files changed, 786 insertions(+), 20 deletions(-) create mode 100644 docker.Makefile create mode 100644 docker/Dockerfile-centos create mode 100644 docker/Dockerfile-ubuntu create mode 100644 experiment/Dockerfile-centos create mode 100644 experiment/Dockerfile-ubuntu create mode 100644 experiment/README-CN.md create mode 100644 experiment/README-EN.md diff --git a/doc/en/install.md b/doc/en/install.md index 0c721b9..591cb5d 100644 --- a/doc/en/install.md +++ b/doc/en/install.md @@ -59,12 +59,28 @@ cd ../../ ``` ### Environment Image -Users can obtain an image with the InternLM runtime environment installed from https://hub.docker.com/r/sunpengsdu/internlm. The commands for pulling the image and starting the container are as follows: +Users can use the provided dockerfile combined with docker.Makefile to build their own images, or obtain images with InternLM runtime environment installed from https://hub.docker.com/r/internlm/internlm. + +#### Image Configuration and Build +The configuration and build of the Dockerfile are implemented through the docker.Makefile. To build the image, execute the following command in the root directory of InternLM: +``` bash +make -f docker.Makefile BASE_OS=centos7 +``` +In docker.Makefile, you can customize the basic image, environment version, etc., and the corresponding parameters can be passed directly through the command line. For BASE_OS, ubuntu20.04 and centos7 are respectively supported. + +#### Pull Standard Image +The standard image based on ubuntu and centos has been built and can be directly pulled: ```bash -# pull image -docker pull sunpengsdu/internlm:torch1.13-cuda11.7-flashatten1.0.5-centos -# start container -docker run --gpus all -d -it --shm-size=2gb --name myinternlm sunpengsdu/internlm:torch1.13-cuda11.7-flashatten1.0.5-centos -docker exec -it myinternlm bash +# ubuntu20.04 +docker pull internlm/internlm:torch1.13.1-cuda11.7.1-flashatten1.0.5-ubuntu20.04 +# centos7 +docker pull internlm/internlm:torch1.13.1-cuda11.7.1-flashatten1.0.5-centos7 ``` + +#### Run Container +For the local standard image built with dockerfile or pulled, use the following command to run and enter the container: +```bash +docker run --gpus all -it -m 500g --cap-add=SYS_PTRACE --cap-add=IPC_LOCK --shm-size 20g --network=host --name myinternlm internlm/internlm:torch1.13.1-cuda11.7.1-flashatten1.0.5-centos7 bash +``` +The default directory in the container is `/InternLM`, please start training according to the [Usage](./usage.md). diff --git a/doc/install.md b/doc/install.md index d5e547e..97578d6 100644 --- a/doc/install.md +++ b/doc/install.md @@ -59,11 +59,28 @@ cd ../../ ``` ### 环境镜像 -用户可以从 https://hub.docker.com/r/sunpengsdu/internlm 获取安装了 InternLM 运行环境的镜像,拉取镜像及启动容器的命令如下: +用户可以使用提供的 dockerfile 结合 docker.Makefile 来构建自己的镜像,或者也可以从 https://hub.docker.com/r/internlm/internlm 获取安装了 InternLM 运行环境的镜像。 + +#### 镜像配置及构造 +dockerfile 的配置以及构造均通过 docker.Makefile 文件实现,在 InternLM 根目录下执行如下命令即可 build 镜像: +``` bash +make -f docker.Makefile BASE_OS=centos7 +``` +在 docker.Makefile 中可自定义基础镜像,环境版本等内容,对应参数可直接通过命令行传递。对于 BASE_OS 分别支持 ubuntu20.04 和 centos7。 + +#### 镜像拉取 +基于 ubuntu 和 centos 的标准镜像已经 build 完成也可直接拉取使用: + ```bash -# 拉取镜像 -docker pull sunpengsdu/internlm:torch1.13-cuda11.7-flashatten1.0.5-centos -# 启动容器 -docker run --gpus all -d -it --shm-size=2gb --name myinternlm sunpengsdu/internlm:torch1.13-cuda11.7-flashatten1.0.5-centos -docker exec -it myinternlm bash +# ubuntu20.04 +docker pull internlm/internlm:torch1.13.1-cuda11.7.1-flashatten1.0.5-ubuntu20.04 +# centos7 +docker pull internlm/internlm:torch1.13.1-cuda11.7.1-flashatten1.0.5-centos7 ``` + +#### 容器启动 +对于使用 dockerfile 构建或拉取的本地标准镜像,使用如下命令启动并进入容器: +```bash +docker run --gpus all -it -m 500g --cap-add=SYS_PTRACE --cap-add=IPC_LOCK --shm-size 20g --network=host --name myinternlm internlm/internlm:torch1.13.1-cuda11.7.1-flashatten1.0.5-centos7 bash +``` +容器内默认目录即 `/InternLM`,根据[使用文档](./usage.md)即可启动训练。 diff --git a/docker.Makefile b/docker.Makefile new file mode 100644 index 0000000..21ce55a --- /dev/null +++ b/docker.Makefile @@ -0,0 +1,107 @@ +DOCKER_REGISTRY ?= docker.io +DOCKER_ORG ?= my +DOCKER_IMAGE ?= internlm +DOCKER_FULL_NAME = $(DOCKER_REGISTRY)/$(DOCKER_ORG)/$(DOCKER_IMAGE) + +CUDA_VERSION = 11.7.1 +GCC_VERSION = 10.2.0 + +CUDNN_VERSION = 8 +BASE_RUNTIME = +# ubuntu20.04 centos7 +BASE_OS = centos7 +BASE_DEVEL = nvidia/cuda:$(CUDA_VERSION)-cudnn$(CUDNN_VERSION)-devel-${BASE_OS} +# The conda channel to use to install cudatoolkit +CUDA_CHANNEL = nvidia +# The conda channel to use to install pytorch / torchvision +INSTALL_CHANNEL ?= pytorch + +PYTHON_VERSION ?= 3.10 +PYTORCH_VERSION ?= 1.13.1 +TORCHVISION_VERSION ?= 0.14.1 +TORCHAUDIO_VERSION ?= 0.13.1 +BUILD_PROGRESS ?= auto +TRITON_VERSION ?= +GMP_VERSION ?= 6.2.1 +MPFR_VERSION ?= 4.1.0 +MPC_VERSION ?= 1.2.1 +GCC_VERSION ?= 10.2.0 +HTTPS_PROXY_I ?= +HTTP_PROXY_I ?= +FLASH_ATTEN_VERSION ?= 1.0.5 +FLASH_ATTEN_TAG ?= v${FLASH_ATTEN_VERSION} + +BUILD_ARGS = --build-arg BASE_IMAGE=$(BASE_IMAGE) \ + --build-arg PYTHON_VERSION=$(PYTHON_VERSION) \ + --build-arg CUDA_VERSION=$(CUDA_VERSION) \ + --build-arg CUDA_CHANNEL=$(CUDA_CHANNEL) \ + --build-arg PYTORCH_VERSION=$(PYTORCH_VERSION) \ + --build-arg TORCHVISION_VERSION=$(TORCHVISION_VERSION) \ + --build-arg TORCHAUDIO_VERSION=$(TORCHAUDIO_VERSION) \ + --build-arg INSTALL_CHANNEL=$(INSTALL_CHANNEL) \ + --build-arg TRITON_VERSION=$(TRITON_VERSION) \ + --build-arg GMP_VERSION=$(GMP_VERSION) \ + --build-arg MPFR_VERSION=$(MPFR_VERSION) \ + --build-arg MPC_VERSION=$(MPC_VERSION) \ + --build-arg GCC_VERSION=$(GCC_VERSION) \ + --build-arg https_proxy=$(HTTPS_PROXY_I) \ + --build-arg http_proxy=$(HTTP_PROXY_I) \ + --build-arg FLASH_ATTEN_TAG=$(FLASH_ATTEN_TAG) + +EXTRA_DOCKER_BUILD_FLAGS ?= + +BUILD ?= build +# Intentionally left blank +PLATFORMS_FLAG ?= +PUSH_FLAG ?= +USE_BUILDX ?=1 +BUILD_PLATFORMS ?= +WITH_PUSH ?= false +BUILD_TYPE ?= intrenlm-dev + +# Setup buildx flags +ifneq ("$(USE_BUILDX)","") +BUILD = buildx build +ifneq ("$(BUILD_PLATFORMS)","") +PLATFORMS_FLAG = --platform="$(BUILD_PLATFORMS)" +endif +endif +# endif + +# # Only set platforms flags if using buildx +# ifeq ("$(WITH_PUSH)","true") +# PUSH_FLAG = --push +# endif +# endif + +ifeq ($(findstring centos,$(BASE_OS)),centos) + DOCKERFILE_PATH ?= ./docker/Dockerfile-centos +else + DOCKERFILE_PATH ?= ./docker/Dockerfile-ubuntu +endif + +#use -f to specify dockerfile +DOCKER_BUILD = DOCKER_BUILDKIT=1 \ + docker $(BUILD) \ + --progress=$(BUILD_PROGRESS) \ + $(EXTRA_DOCKER_BUILD_FLAGS) \ + $(PLATFORMS_FLAG) \ + $(PUSH_FLAG) \ + -f $(DOCKERFILE_PATH) \ + -t $(DOCKER_FULL_NAME):$(DOCKER_TAG) \ + $(BUILD_ARGS) . + + # --target $(BUILD_TYPE) + +.PHONY: all +all: devel-image + +.PHONY: devel-image +devel-image: BASE_IMAGE := $(BASE_DEVEL) +devel-image: DOCKER_TAG := torch${PYTORCH_VERSION}-cuda${CUDA_VERSION}-flashatten${FLASH_ATTEN_VERSION}-${BASE_OS} +devel-image: + $(DOCKER_BUILD) + +.PHONY: clean +clean: + -docker rmi -f $(shell docker images -q $(DOCKER_FULL_NAME)) \ No newline at end of file diff --git a/docker/Dockerfile-centos b/docker/Dockerfile-centos new file mode 100644 index 0000000..917d28f --- /dev/null +++ b/docker/Dockerfile-centos @@ -0,0 +1,131 @@ +ARG BASE_IMAGE +ARG https_proxy +ARG http_proxy + +############################################################################## +# Install the basic environment on centos +############################################################################## +FROM ${BASE_IMAGE} as base +ARG https_proxy +ARG http_proxy +RUN yum install deltarpm -y && yum update -y \ + && yum install -y \ + ca-certificates \ + cmake \ + curl \ + git \ + wget \ + tar \ + m4 \ + bzip2 \ + gcc \ + gcc-c++ \ + file \ + texinfo \ + which + + +############################################################################## +# Install the conda environment +############################################################################## +FROM base as conda +ARG PYTHON_VERSION=3.10 +ARG TARGETPLATFORM +ARG https_proxy +ARG http_proxy +RUN case ${TARGETPLATFORM} in \ + "linux/arm64") MINICONDA_ARCH=aarch64 ;; \ + *) MINICONDA_ARCH=x86_64 ;; \ + esac && \ + curl -fsSL -v -o ~/miniconda.sh -O "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-${MINICONDA_ARCH}.sh" + +RUN chmod +x ~/miniconda.sh && \ + bash ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda install -y python=${PYTHON_VERSION} cmake conda-build pyyaml numpy ipython && \ + /opt/conda/bin/conda clean -ya + + +############################################################################## +# Install environment dependencies +############################################################################## +FROM conda as dep +WORKDIR /dep +ARG https_proxy +ARG http_proxy +ARG GMP_VERSION +ARG MPFR_VERSION +ARG MPC_VERSION +RUN wget https://ftp.gnu.org/gnu/gmp/gmp-${GMP_VERSION}.tar.bz2 \ + && tar -vxf gmp-${GMP_VERSION}.tar.bz2 \ + && cd gmp-${GMP_VERSION}/ \ + && ./configure --prefix=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget https://ftp.gnu.org/gnu/mpfr/mpfr-${MPFR_VERSION}.tar.gz \ + && tar -vxf mpfr-${MPFR_VERSION}.tar.gz \ + && cd mpfr-${MPFR_VERSION}/ \ + && ./configure --prefix=/usr/local/mpfr-${MPFR_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget http://www.multiprecision.org/downloads/mpc-${MPC_VERSION}.tar.gz \ + && tar -vxf mpc-${MPC_VERSION}.tar.gz \ + && cd mpc-${MPC_VERSION}/ \ + && ./configure --prefix=/usr/local/mpc-${MPC_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && git clone https://github.com/ninja-build/ninja.git \ + && cd ninja \ + && git checkout release \ + && ./configure.py --bootstrap \ + && mv ./ninja /usr/bin \ + && cd .. + +ENV MPFR_HOME=/usr/local/mpfr-${MPFR_VERSION} +ENV LD_LIBRARY_PATH=${MPFR_HOME}/lib:$LD_LIBRARY_PATH + +ARG https_proxy +ARG http_proxy +ARG GCC_VERSION +ARG GMP_VERSION +ARG MPFR_VERSION +ARG MPC_VERSION +RUN wget https://ftp.gnu.org/gnu/gcc/gcc-${GCC_VERSION}/gcc-${GCC_VERSION}.tar.xz \ + && tar -vxf gcc-${GCC_VERSION}.tar.xz \ + && mkdir build \ + && cd build/ \ + && ../gcc-${GCC_VERSION}/configure --prefix=/usr/local/gcc-${GCC_VERSION}/ --enable-threads=posix --disable-checking --enable-languages=c,c++ --disable-multilib \ + --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} --with-mpc=/usr/local/mpc-${MPC_VERSION} \ + && make -j64 && make install + +ENV GCC_HOME=/usr/local/gcc-${GCC_VERSION} +ENV LD_LIBRARY_PATH=${GCC_HOME}/lib64:${CUDA_PATH}/lib64:$LD_LIBRARY_PATH +ENV PATH=${GCC_HOME}/bin:${CUDA_PATH}/bin:$PATH +ENV CC=${GCC_HOME}/bin/gcc +ENV CXX=${GCC_HOME}/bin/c++ + + +############################################################################## +# Install InternLM development environment, including flash-attention and apex +############################################################################## +FROM dep as intrenlm-dev +COPY . /InternLM +WORKDIR /InternLM +ARG https_proxy +ARG http_proxy +ARG TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" +RUN git submodule update --init --recursive \ + && /opt/conda/bin/pip --no-cache-dir install -r requirements/torch.txt \ + && /opt/conda/bin/pip --no-cache-dir install -r requirements/runtime.txt \ + && cd /InternLM/third_party/flash-attention \ + && /opt/conda/bin/python setup.py install \ + && cd ./csrc \ + && cd fused_dense_lib && /opt/conda/bin/pip install -v . \ + && cd ../xentropy && /opt/conda/bin/pip install -v . \ + && cd ../rotary && /opt/conda/bin/pip install -v . \ + && cd ../layer_norm && /opt/conda/bin/pip install -v . \ + && cd ../../../../ \ + && cd ./third_party/apex \ + && /opt/conda/bin/pip --no-cache-dir install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \ + && /opt/conda/bin/pip cache purge \ + && rm -rf ~/.cache/pip \ No newline at end of file diff --git a/docker/Dockerfile-ubuntu b/docker/Dockerfile-ubuntu new file mode 100644 index 0000000..e73421a --- /dev/null +++ b/docker/Dockerfile-ubuntu @@ -0,0 +1,112 @@ +ARG BASE_IMAGE +ARG https_proxy +ARG http_proxy + +############################################################################## +# Install the basic environment on ubuntu +############################################################################## +FROM ${BASE_IMAGE} as base +ARG https_proxy +ARG http_proxy +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + build-essential \ + ca-certificates \ + cmake \ + curl \ + git \ + wget \ + tar \ + m4 \ + ninja-build + + +############################################################################## +# Install the conda environment +############################################################################## +FROM base as conda +ARG PYTHON_VERSION=3.10 +ARG TARGETPLATFORM +ARG https_proxy +ARG http_proxy +RUN case ${TARGETPLATFORM} in \ + "linux/arm64") MINICONDA_ARCH=aarch64 ;; \ + *) MINICONDA_ARCH=x86_64 ;; \ + esac && \ + curl -fsSL -v -o ~/miniconda.sh -O "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-${MINICONDA_ARCH}.sh" + +RUN chmod +x ~/miniconda.sh && \ + bash ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda install -y python=${PYTHON_VERSION} cmake conda-build pyyaml numpy ipython && \ + /opt/conda/bin/conda clean -ya + + +############################################################################## +# Install environment dependencies +############################################################################## +FROM conda as dep +WORKDIR /dep +ARG https_proxy +ARG http_proxy +ARG GCC_VERSION +ARG GMP_VERSION +ARG MPFR_VERSION +ARG MPC_VERSION +RUN wget https://ftp.gnu.org/gnu/gmp/gmp-${GMP_VERSION}.tar.bz2 \ + && tar -vxf gmp-${GMP_VERSION}.tar.bz2 \ + && cd gmp-${GMP_VERSION}/ \ + && ./configure --prefix=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget https://ftp.gnu.org/gnu/mpfr/mpfr-${MPFR_VERSION}.tar.gz \ + && tar -vxf mpfr-${MPFR_VERSION}.tar.gz \ + && cd mpfr-${MPFR_VERSION}/ \ + && ./configure --prefix=/usr/local/mpfr-${MPFR_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget http://www.multiprecision.org/downloads/mpc-${MPC_VERSION}.tar.gz \ + && tar -vxf mpc-${MPC_VERSION}.tar.gz \ + && cd mpc-${MPC_VERSION}/ \ + && ./configure --prefix=/usr/local/mpc-${MPC_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget https://ftp.gnu.org/gnu/gcc/gcc-${GCC_VERSION}/gcc-${GCC_VERSION}.tar.xz \ + && tar -vxJf gcc-${GCC_VERSION}.tar.xz \ + && mkdir build \ + && cd build/ \ + && ../gcc-${GCC_VERSION}/configure --prefix=/usr/local/gcc-${GCC_VERSION}/ --enable-checking=release --enable-languages=c,c++ --disable-multilib \ + --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} --with-mpc=/usr/local/mpc-${MPC_VERSION} \ + && make -j64 && make install + +ENV GCC_HOME=/usr/local/gcc-${GCC_VERSION} +ENV MPFR_HOME=/usr/local/mpfr-${MPFR_VERSION} +ENV LD_LIBRARY_PATH=${GCC_HOME}/lib64:${MPFR_HOME}/lib:${CUDA_PATH}/lib64:$LD_LIBRARY_PATH +ENV PATH=${GCC_HOME}/bin:${CUDA_PATH}/bin:$PATH +ENV CC=${GCC_HOME}/bin/gcc +ENV CXX=${GCC_HOME}/bin/c++ + + +############################################################################## +# Install InternLM development environment, including flash-attention and apex +############################################################################## +FROM dep as intrenlm-dev +COPY . /InternLM +WORKDIR /InternLM +ARG https_proxy +ARG http_proxy +ARG TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" +RUN git submodule update --init --recursive \ + && /opt/conda/bin/pip --no-cache-dir install -r requirements/torch.txt \ + && /opt/conda/bin/pip --no-cache-dir install -r requirements/runtime.txt \ + && cd /InternLM/third_party/flash-attention \ + && /opt/conda/bin/python setup.py install \ + && cd ./csrc \ + && cd fused_dense_lib && /opt/conda/bin/pip install -v . \ + && cd ../xentropy && /opt/conda/bin/pip install -v . \ + && cd ../rotary && /opt/conda/bin/pip install -v . \ + && cd ../layer_norm && /opt/conda/bin/pip install -v . \ + && cd ../../../../ \ + && cd ./third_party/apex \ + && /opt/conda/bin/pip --no-cache-dir install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \ + && /opt/conda/bin/pip cache purge \ + && rm -rf ~/.cache/pip \ No newline at end of file diff --git a/experiment/Dockerfile-centos b/experiment/Dockerfile-centos new file mode 100644 index 0000000..31ffc19 --- /dev/null +++ b/experiment/Dockerfile-centos @@ -0,0 +1,161 @@ +ARG BASE_IMAGE +ARG https_proxy +ARG http_proxy + +############################################################################## +# Install the basic environment on centos +############################################################################## +FROM ${BASE_IMAGE} as base +ARG https_proxy +ARG http_proxy +RUN yum install deltarpm -y && yum update -y \ + && yum install -y \ + ca-certificates \ + cmake \ + curl \ + git \ + wget \ + tar \ + m4 \ + bzip2 \ + gcc \ + gcc-c++ \ + file \ + texinfo \ + which + + +############################################################################## +# Install the conda environment +############################################################################## +FROM base as conda +ARG PYTHON_VERSION=3.10 +ARG TARGETPLATFORM +ARG https_proxy +ARG http_proxy +RUN case ${TARGETPLATFORM} in \ + "linux/arm64") MINICONDA_ARCH=aarch64 ;; \ + *) MINICONDA_ARCH=x86_64 ;; \ + esac && \ + curl -fsSL -v -o ~/miniconda.sh -O "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-${MINICONDA_ARCH}.sh" + +RUN chmod +x ~/miniconda.sh && \ + bash ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda install -y python=${PYTHON_VERSION} cmake conda-build pyyaml numpy ipython && \ + /opt/conda/bin/conda clean -ya + + +############################################################################## +# Install environment dependencies +############################################################################## +FROM conda as dep +WORKDIR /dep +ARG https_proxy +ARG http_proxy +ARG GMP_VERSION +ARG MPFR_VERSION +ARG MPC_VERSION +RUN wget https://ftp.gnu.org/gnu/gmp/gmp-${GMP_VERSION}.tar.bz2 \ + && tar -vxf gmp-${GMP_VERSION}.tar.bz2 \ + && cd gmp-${GMP_VERSION}/ \ + && ./configure --prefix=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget https://ftp.gnu.org/gnu/mpfr/mpfr-${MPFR_VERSION}.tar.gz \ + && tar -vxf mpfr-${MPFR_VERSION}.tar.gz \ + && cd mpfr-${MPFR_VERSION}/ \ + && ./configure --prefix=/usr/local/mpfr-${MPFR_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget http://www.multiprecision.org/downloads/mpc-${MPC_VERSION}.tar.gz \ + && tar -vxf mpc-${MPC_VERSION}.tar.gz \ + && cd mpc-${MPC_VERSION}/ \ + && ./configure --prefix=/usr/local/mpc-${MPC_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && git clone https://github.com/ninja-build/ninja.git \ + && cd ninja \ + && git checkout release \ + && ./configure.py --bootstrap \ + && mv ./ninja /usr/bin \ + && cd .. + +ENV MPFR_HOME=/usr/local/mpfr-${MPFR_VERSION} +ENV LD_LIBRARY_PATH=${MPFR_HOME}/lib:$LD_LIBRARY_PATH + +ARG https_proxy +ARG http_proxy +ARG GCC_VERSION +ARG GMP_VERSION +ARG MPFR_VERSION +ARG MPC_VERSION +RUN wget https://ftp.gnu.org/gnu/gcc/gcc-${GCC_VERSION}/gcc-${GCC_VERSION}.tar.xz \ + && tar -vxf gcc-${GCC_VERSION}.tar.xz \ + && mkdir build \ + && cd build/ \ + && ../gcc-${GCC_VERSION}/configure --prefix=/usr/local/gcc-${GCC_VERSION}/ --enable-threads=posix --disable-checking --enable-languages=c,c++ --disable-multilib \ + --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} --with-mpc=/usr/local/mpc-${MPC_VERSION} \ + && make -j64 && make install + +ENV GCC_HOME=/usr/local/gcc-${GCC_VERSION} +ENV LD_LIBRARY_PATH=${GCC_HOME}/lib64:${CUDA_PATH}/lib64:$LD_LIBRARY_PATH +ENV PATH=${GCC_HOME}/bin:${CUDA_PATH}/bin:$PATH +ENV CC=${GCC_HOME}/bin/gcc +ENV CXX=${GCC_HOME}/bin/c++ + + +############################################################################## +# Install InternLM development environment, including flash-attention and apex +############################################################################## +FROM dep as intrenlm-dev +COPY . /InternLM +WORKDIR /InternLM +ARG https_proxy +ARG http_proxy +ARG PYTORCH_VERSION +ARG TORCHVISION_VERSION +ARG TORCHAUDIO_VERSION + +RUN /opt/conda/bin/pip --no-cache-dir install \ + transformers==4.29.2 \ + sentencepiece \ + numpy \ + tqdm \ + psutil \ + packaging \ + pre-commit \ + ninja \ + gputil \ + pytest \ + packaging \ + boto3 \ + botocore \ + torch-scatter \ + pyecharts \ + -f https://data.pyg.org/whl/torch-${PYTORCH_VERSION}+cu117.html \ + && /opt/conda/bin/pip --no-cache-dir install \ + --extra-index-url https://download.pytorch.org/whl/cu117 \ + torch==${PYTORCH_VERSION}+cu117 \ + torchvision==${TORCHVISION_VERSION}+cu117 \ + torchaudio==${TORCHAUDIO_VERSION} + +ARG https_proxy +ARG http_proxy +ARG TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" +ARG FLASH_ATTEN_TAG + +RUN git submodule update --init --recursive \ + && cd /InternLM/third_party/flash-attention \ + && git checkout ${FLASH_ATTEN_TAG} \ + && /opt/conda/bin/python setup.py install \ + && cd ./csrc \ + && cd fused_dense_lib && /opt/conda/bin/pip install -v . \ + && cd ../xentropy && /opt/conda/bin/pip install -v . \ + && cd ../rotary && /opt/conda/bin/pip install -v . \ + && cd ../layer_norm && /opt/conda/bin/pip install -v . \ + && cd ../../../../ \ + && cd ./third_party/apex \ + && /opt/conda/bin/pip --no-cache-dir install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \ + && /opt/conda/bin/pip cache purge \ + && rm -rf ~/.cache/pip \ No newline at end of file diff --git a/experiment/Dockerfile-ubuntu b/experiment/Dockerfile-ubuntu new file mode 100644 index 0000000..230a3b5 --- /dev/null +++ b/experiment/Dockerfile-ubuntu @@ -0,0 +1,142 @@ +ARG BASE_IMAGE +ARG https_proxy +ARG http_proxy + +############################################################################## +# Install the basic environment on ubuntu +############################################################################## +FROM ${BASE_IMAGE} as base +ARG https_proxy +ARG http_proxy +RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + build-essential \ + ca-certificates \ + cmake \ + curl \ + git \ + wget \ + tar \ + m4 \ + ninja-build + + +############################################################################## +# Install the conda environment +############################################################################## +FROM base as conda +ARG PYTHON_VERSION=3.10 +ARG TARGETPLATFORM +ARG https_proxy +ARG http_proxy +RUN case ${TARGETPLATFORM} in \ + "linux/arm64") MINICONDA_ARCH=aarch64 ;; \ + *) MINICONDA_ARCH=x86_64 ;; \ + esac && \ + curl -fsSL -v -o ~/miniconda.sh -O "https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-${MINICONDA_ARCH}.sh" + +RUN chmod +x ~/miniconda.sh && \ + bash ~/miniconda.sh -b -p /opt/conda && \ + rm ~/miniconda.sh && \ + /opt/conda/bin/conda install -y python=${PYTHON_VERSION} cmake conda-build pyyaml numpy ipython && \ + /opt/conda/bin/conda clean -ya + + +############################################################################## +# Install environment dependencies +############################################################################## +FROM conda as dep +WORKDIR /dep +ARG https_proxy +ARG http_proxy +ARG GCC_VERSION +ARG GMP_VERSION +ARG MPFR_VERSION +ARG MPC_VERSION +RUN wget https://ftp.gnu.org/gnu/gmp/gmp-${GMP_VERSION}.tar.bz2 \ + && tar -vxf gmp-${GMP_VERSION}.tar.bz2 \ + && cd gmp-${GMP_VERSION}/ \ + && ./configure --prefix=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget https://ftp.gnu.org/gnu/mpfr/mpfr-${MPFR_VERSION}.tar.gz \ + && tar -vxf mpfr-${MPFR_VERSION}.tar.gz \ + && cd mpfr-${MPFR_VERSION}/ \ + && ./configure --prefix=/usr/local/mpfr-${MPFR_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget http://www.multiprecision.org/downloads/mpc-${MPC_VERSION}.tar.gz \ + && tar -vxf mpc-${MPC_VERSION}.tar.gz \ + && cd mpc-${MPC_VERSION}/ \ + && ./configure --prefix=/usr/local/mpc-${MPC_VERSION} --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} \ + && make -j64 && make install \ + && cd .. \ + && wget https://ftp.gnu.org/gnu/gcc/gcc-${GCC_VERSION}/gcc-${GCC_VERSION}.tar.xz \ + && tar -vxJf gcc-${GCC_VERSION}.tar.xz \ + && mkdir build \ + && cd build/ \ + && ../gcc-${GCC_VERSION}/configure --prefix=/usr/local/gcc-${GCC_VERSION}/ --enable-checking=release --enable-languages=c,c++ --disable-multilib \ + --with-gmp=/usr/local/gmp-${GMP_VERSION} --with-mpfr=/usr/local/mpfr-${MPFR_VERSION} --with-mpc=/usr/local/mpc-${MPC_VERSION} \ + && make -j64 && make install + +ENV GCC_HOME=/usr/local/gcc-${GCC_VERSION} +ENV MPFR_HOME=/usr/local/mpfr-${MPFR_VERSION} +ENV LD_LIBRARY_PATH=${GCC_HOME}/lib64:${MPFR_HOME}/lib:${CUDA_PATH}/lib64:$LD_LIBRARY_PATH +ENV PATH=${GCC_HOME}/bin:${CUDA_PATH}/bin:$PATH +ENV CC=${GCC_HOME}/bin/gcc +ENV CXX=${GCC_HOME}/bin/c++ + + +############################################################################## +# Install InternLM development environment, including flash-attention and apex +############################################################################## +FROM dep as intrenlm-dev +COPY . /InternLM +WORKDIR /InternLM +ARG https_proxy +ARG http_proxy +ARG PYTORCH_VERSION +ARG TORCHVISION_VERSION +ARG TORCHAUDIO_VERSION + +RUN /opt/conda/bin/pip --no-cache-dir install \ + transformers==4.29.2 \ + sentencepiece \ + numpy \ + tqdm \ + psutil \ + packaging \ + pre-commit \ + ninja \ + gputil \ + pytest \ + packaging \ + boto3 \ + botocore \ + torch-scatter \ + pyecharts \ + -f https://data.pyg.org/whl/torch-${PYTORCH_VERSION}+cu117.html \ + && /opt/conda/bin/pip --no-cache-dir install \ + --extra-index-url https://download.pytorch.org/whl/cu117 \ + torch==${PYTORCH_VERSION}+cu117 \ + torchvision==${TORCHVISION_VERSION}+cu117 \ + torchaudio==${TORCHAUDIO_VERSION} + +ARG https_proxy +ARG http_proxy +ARG TORCH_CUDA_ARCH_LIST="8.0;8.6+PTX" +ARG FLASH_ATTEN_TAG + +RUN git submodule update --init --recursive \ + && cd /InternLM/third_party/flash-attention \ + && git checkout ${FLASH_ATTEN_TAG} \ + && /opt/conda/bin/python setup.py install \ + && cd ./csrc \ + && cd fused_dense_lib && /opt/conda/bin/pip install -v . \ + && cd ../xentropy && /opt/conda/bin/pip install -v . \ + && cd ../rotary && /opt/conda/bin/pip install -v . \ + && cd ../layer_norm && /opt/conda/bin/pip install -v . \ + && cd ../../../../ \ + && cd ./third_party/apex \ + && /opt/conda/bin/pip --no-cache-dir install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ \ + && /opt/conda/bin/pip cache purge \ + && rm -rf ~/.cache/pip \ No newline at end of file diff --git a/experiment/README-CN.md b/experiment/README-CN.md new file mode 100644 index 0000000..1f96cc7 --- /dev/null +++ b/experiment/README-CN.md @@ -0,0 +1,25 @@ +## 实验性环境镜像 +本模块用于测试新版本环境,默认测试新环境 torch=2.0.1,flash-attention=2.1.0。新环境可能具有不稳定性,标准环境安装请参考:[安装文档](../doc/install.md) + +### 镜像构建及拉取 +构建镜像时请于 InternLM 根目录下执行 docker.Makefile,该文件与标准环境镜像共用,所使用的 Dockerfile 位于 experiment 目录下。也可直接从 https://hub.docker.com/r/internlm/internlm 拉取镜像,命令如下: +```bash +# 构建镜像 +# ubuntu20.04 +make -f docker.Makefile BASE_OS=ubuntu20.04 DOCKERFILE_PATH=./experiment/Dockerfile-ubuntu PYTORCH_VERSION=2.0.1 TORCHVISION_VERSION=0.15.2 TORCHAUDIO_VERSION=2.0.2 FLASH_ATTEN_VERSION=2.1.0 +# centos7 +make -f docker.Makefile BASE_OS=centos7 DOCKERFILE_PATH=./experiment/Dockerfile-centos PYTORCH_VERSION=2.0.1 TORCHVISION_VERSION=0.15.2 TORCHAUDIO_VERSION=2.0.2 FLASH_ATTEN_VERSION=2.1.0 + +# 拉取镜像 +# ubuntu20.04 +docker pull internlm/internlm:experiment-torch2.0.1-flashatten2.1.0-ubuntu20.04 +# centos7 +docker pull internlm/internlm:experiment-torch2.0.1-flashatten2.1.0-centos7 +``` + +### 容器启动 +对于使用 dockerfile 构建或拉取的本地标准镜像,使用如下命令启动并进入容器: +```bash +docker run --gpus all -it -m 500g --cap-add=SYS_PTRACE --cap-add=IPC_LOCK --shm-size 20g --network=host --name myinternlm internlm/internlm:experiment-torch2.0.1-flashatten2.1.0-centos7 bash +``` +容器内默认目录即 `/InternLM`,根据[使用文档](../doc/usage.md)即可启动训练。 \ No newline at end of file diff --git a/experiment/README-EN.md b/experiment/README-EN.md new file mode 100644 index 0000000..f9bae2b --- /dev/null +++ b/experiment/README-EN.md @@ -0,0 +1,25 @@ +## Environment Image for experiment +This module is used to test the new version environment, the default test new environment is torch=2.0.1, flash-attention=2.1.0. The new environment may be unstable, for the standard environment installation please refer to: [installation guide](../doc/en/install.md) + +### Build and Pull Image +When building the image, please make docker.Makefile in the InternLM root directory. This Makefile is shared with the standard environment image, and the Dockerfile used is located in the experiment directory. You can also pull the image directly from https://hub.docker.com/r/internlm/internlm, the command is as follows: +```bash +# Build Image +# ubuntu20.04 +make -f docker.Makefile BASE_OS=ubuntu20.04 DOCKERFILE_PATH=./experiment/Dockerfile-ubuntu PYTORCH_VERSION=2.0.1 TORCHVISION_VERSION=0.15.2 TORCHAUDIO_VERSION=2.0.2 FLASH_ATTEN_VERSION=2.1.0 +# centos7 +make -f docker.Makefile BASE_OS=centos7 DOCKERFILE_PATH=./experiment/Dockerfile-centos PYTORCH_VERSION=2.0.1 TORCHVISION_VERSION=0.15.2 TORCHAUDIO_VERSION=2.0.2 FLASH_ATTEN_VERSION=2.1.0 + +# Pull Image +# ubuntu20.04 +docker pull internlm/internlm:experiment-torch2.0.1-flashatten2.1.0-ubuntu20.04 +# centos7 +docker pull internlm/internlm:experiment-torch2.0.1-flashatten2.1.0-centos7 +``` + +### Run Container +For the local standard image built with dockerfile or pulled, use the following command to run and enter the container: +```bash +docker run --gpus all -it -m 500g --cap-add=SYS_PTRACE --cap-add=IPC_LOCK --shm-size 20g --network=host --name myinternlm internlm/internlm:experiment-torch2.0.1-flashatten2.1.0-centos7 bash +``` +The default directory in the container is `/InternLM`, please start training according to the [Usage](../doc/en/usage.md). \ No newline at end of file diff --git a/internlm/core/trainer.py b/internlm/core/trainer.py index a027fed..2839ad9 100644 --- a/internlm/core/trainer.py +++ b/internlm/core/trainer.py @@ -78,8 +78,9 @@ class TrainState: self.step_count = other_stuffs.get("step_count", other_stuffs["batch_count"]) + 1 # track the actual updates of sampler when using weighted sampling - self.batch_sampler = train_dl.batch_sampler.copy() - self.batch_sampler_iter = iter(self.batch_sampler) + if hasattr(self, "batch_sampler"): + self.batch_sampler = train_dl.batch_sampler.copy() + self.batch_sampler_iter = iter(self.batch_sampler) # resume tensorboard from older tensorboard_folder self.resume_tb_folder = other_stuffs.get("tensorboard_folder", None) diff --git a/internlm/initialize/initialize_tensor.py b/internlm/initialize/initialize_tensor.py index 2580ca0..b317f26 100644 --- a/internlm/initialize/initialize_tensor.py +++ b/internlm/initialize/initialize_tensor.py @@ -3,16 +3,15 @@ import math -import torch from torch import Tensor, nn -def scaled_init_method_normal(sigma, num_layers): +def scaled_init_method_normal(sigma: float = 1.0, num_layers: int = 1): """Init method based on N(0, sigma/sqrt(2*num_layers).""" std = sigma / math.sqrt(2.0 * num_layers) def init_(tensor): - return torch.nn.init.normal_(tensor, mean=0.0, std=std) + return nn.init.normal_(tensor, mean=0.0, std=std) return init_ @@ -32,3 +31,33 @@ def normal_(mean: float = 0.0, std: float = 1.0): return nn.init.normal_(tensor, mean, std) return initializer + + +def scaled_init_method_uniform(sigma: float = 1.0, num_layers: int = 1): + """Init method based on p(x)=Uniform(-a, a) where std(x)=sigma/sqrt(2*num_layers).""" + std = sigma / math.sqrt(2.0 * num_layers) + a = math.sqrt(3.0 * std) + + def init_(tensor): + return nn.init.uniform_(tensor, -a, a) + + return init_ + + +def uniform_(mean: float = 0.0, std: float = 1.0): + r"""Return the initializer filling the input Tensor with values drawn from the uniform distribution + + .. math:: + \mathcal{U}(mean-a, mean+a), where a satisfies \mathcal{U}_{std}=std. + + Args: + mean (float): the mean of the uniform distribution. Defaults 0.0. + std (float): the standard deviation of the uniform distribution. Defaults 1.0. + """ + + a = math.sqrt(3.0 * std) + + def initializer(tensor: Tensor): + return nn.init.uniform_(tensor, mean - a, mean + a) + + return initializer diff --git a/internlm/model/metrics.py b/internlm/model/metrics.py index 1749aa2..24ce592 100644 --- a/internlm/model/metrics.py +++ b/internlm/model/metrics.py @@ -176,7 +176,7 @@ class AccPerplex: res.update(ds_acc) res.update(ds_tokens) - loss_res = self.loss_with_type_id.get_metric() + loss_res = self.loss_with_type_id.get_metric(reset) res.update(loss_res) return res diff --git a/internlm/utils/evaluation.py b/internlm/utils/evaluation.py index f1b2a20..872ef87 100644 --- a/internlm/utils/evaluation.py +++ b/internlm/utils/evaluation.py @@ -76,7 +76,7 @@ def evaluate_on_val_dls( data_cfg = gpc.config.data for val_name, val_dl in val_dls.items(): - if len(val_dl) == 0 and verbose and not streaming: + if not streaming and len(val_dl) == 0 and verbose: logger.info(f"Validation dataset: {val_name} is empty") continue diff --git a/requirements/runtime.txt b/requirements/runtime.txt index c0b345f..f46d7ad 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -13,4 +13,4 @@ boto3 botocore torch-scatter pyecharts --f https://data.pyg.org/whl/torch-1.13.0+cu117.html \ No newline at end of file +-f https://data.pyg.org/whl/torch-1.13.1+cu117.html \ No newline at end of file From 620472f15fb6bf6eaac9e4436fd43608f21acf15 Mon Sep 17 00:00:00 2001 From: Sun Peng Date: Fri, 1 Sep 2023 11:00:11 +0800 Subject: [PATCH 12/34] [Dev2Main] 20130901 (#261) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat(utils/writer.py): support tensorboard writer (#63) * feat(utils/writer.py): support tensorboard writer * feat(utils/writer.py): add class comment --------- Co-authored-by: 黄婷 * [Develop] Pull Main Branch (#121) * fix/fix_submodule_err (#61) * fix/fix_submodule_err --------- Co-authored-by: ChenQiaoling00 * fix issue templates (#65) * fix(tokenizer): refactor tokenizer and update usage in readme (#51) * update tokenizer example * fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73) * fix a typo in readme * in order to find InternLMTokenizer, select a lower version of Transformers --------- Co-authored-by: gouhchangjiang * [Doc] Add wechat and discord link in readme (#78) * Doc:add wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * [Docs]: add Japanese README (#43) * Add Japanese README * Update README-ja-JP.md replace message * Update README-ja-JP.md * add repetition_penalty in GenerationConfig in web_demo.py (#48) Co-authored-by: YWMditto <862779238@qq.com> * use fp16 in instruction (#80) * [Enchancement] add more options for issue template (#77) * [Enchancement] add more options for issue template * update qustion icon * fix link * Use tempfile for convert2hf.py (#23) Fix https://github.com/InternLM/InternLM/issues/50 * delete torch_dtype of README's example code (#100) * set the value of repetition_penalty to 1.0 to avoid random outputs (#99) * Update web_demo.py (#97) Remove meaningless log. * [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106) --------- Co-authored-by: ChenQiaoling00 Co-authored-by: Kai Chen Co-authored-by: Yang Gao Co-authored-by: Changjiang GOU Co-authored-by: gouhchangjiang Co-authored-by: vansin Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com> Co-authored-by: YWMditto <862779238@qq.com> Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com> Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com> Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com> Co-authored-by: Shuo Zhang Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com> * feat(core/scheduler): support pipeline parallel (#98) * feat(utils/writer.py): support tensorboard writer * feat(utils/writer.py): add class comment * feat(core): support pipeline parallel * fix(core): fix demo running error * feat(solver/optimizer): add pp zero optimizer * fix(solver/optimizer): fix word spelling error * feat(core/scheduler): add new dir scheduler in core/ * fix(core): fix ci lint error * feat(solver/optimizer): merge pp and nopp optimizer * doc(usage.md): update usage doc * feat(core/scheduler): support post func * feat(core/scheduler): add dtype para in pp sche and update func get_tensor_shape * feat(core/scheduler): add _load_micro_batch in base scheduler * feat(core/scheduler): support optimizer overlap communication in pp scheduler * feat(core/scheduler): delete data process func code * feat(core/trainer): schedule pre processing for all schedule --------- Co-authored-by: 黄婷 Co-authored-by: huangting.p * refactor(rotaryEmbedding): refactor forward (#120) * use fp16 in instruction (#80) * delete torch_dtype of README's example code (#100) * refactor the forward for rotary embedding --------- Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com> Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com> * feat(model/metrics.py): support calculating accuracy and perplexity m… (#91) * feat(model/metrics.py): support calculating accuracy and perplexity metrics * fix(model/metrics.py): fix import error * feat(train.py): minor update --------- Co-authored-by: 黄婷 Co-authored-by: huangting.p * fix(optimizer/util.py) change inf defination * [Dev] Pull Main (#139) * fix/fix_submodule_err (#61) * fix/fix_submodule_err --------- Co-authored-by: ChenQiaoling00 * fix issue templates (#65) * fix(tokenizer): refactor tokenizer and update usage in readme (#51) * update tokenizer example * fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73) * fix a typo in readme * in order to find InternLMTokenizer, select a lower version of Transformers --------- Co-authored-by: gouhchangjiang * [Doc] Add wechat and discord link in readme (#78) * Doc:add wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * [Docs]: add Japanese README (#43) * Add Japanese README * Update README-ja-JP.md replace message * Update README-ja-JP.md * add repetition_penalty in GenerationConfig in web_demo.py (#48) Co-authored-by: YWMditto <862779238@qq.com> * use fp16 in instruction (#80) * [Enchancement] add more options for issue template (#77) * [Enchancement] add more options for issue template * update qustion icon * fix link * Use tempfile for convert2hf.py (#23) Fix https://github.com/InternLM/InternLM/issues/50 * delete torch_dtype of README's example code (#100) * set the value of repetition_penalty to 1.0 to avoid random outputs (#99) * Update web_demo.py (#97) Remove meaningless log. * [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106) * docs(install.md): update dependency package transformers version to >= 4.28.0 (#124) Co-authored-by: 黄婷 * docs(LICENSE): add license (#125) * add license of colossalai and flash-attn * fix lint * modify the name * fix AutoModel map in convert2hf.py (#116) * variables are not printly as expect (#114) * feat(solver): fix code to adapt to torch2.0 and provide docker images (#128) * feat(solver): fix code to adapt to torch2.0 * docs(install.md): publish internlm environment image * docs(install.md): update dependency packages version * docs(install.md): update default image --------- Co-authored-by: 黄婷 * add demo test (#132) Co-authored-by: qa-caif-cicd * fix web_demo cache accelerate (#133) * fix(hybrid_zero_optim.py): delete math import * Update embedding.py --------- Co-authored-by: ChenQiaoling00 Co-authored-by: Kai Chen Co-authored-by: Yang Gao Co-authored-by: Changjiang GOU Co-authored-by: gouhchangjiang Co-authored-by: vansin Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com> Co-authored-by: YWMditto <862779238@qq.com> Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com> Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com> Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com> Co-authored-by: Shuo Zhang Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com> Co-authored-by: huangting4201 <1538303371@qq.com> Co-authored-by: 黄婷 Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com> Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com> Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com> Co-authored-by: qa-caif-cicd Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com> * style(solver/optimizer/utils.py): fix lint error (#147) Co-authored-by: huangting.p * feat(*): support not-flash-attn for pp and no-pp (#145) * support not flash attention for no-pp * support pipeline * modify the config * refactor the code * refactor the code * remove some unnecessary code * fix(initialize/launch.py): set default value for use_flash_attn (#158) * add default for use_flash_attn * fix lint * feat(utils/logger.py): support uniscale logger (#152) * style(internlm): fix lint error * feat(utils/logger.py): support uniscale logger * fix(utils/logger.py): fix import circular error * feat(train.py): support dashboard metric panel and fix ci train config * fix(ci_scripts/train/slurm_train.sh): fix ci train error * fix(ci_scripts/train/torchrun.sh): fix ci train error * fix(ci_scripts/train): restore ci update * fix(config.json): delete alert webhook * feat(train.py): optimize func init logger * feat(config.json): delete config.json --------- Co-authored-by: 黄婷 Co-authored-by: huangting.p * feat(utils/evaluation.py): support evaluate (#154) * style(internlm): fix lint error * feat(utils/logger.py): support uniscale logger * fix(utils/logger.py): fix import circular error * feat(train.py): support dashboard metric panel and fix ci train config * fix(ci_scripts/train/slurm_train.sh): fix ci train error * fix(ci_scripts/train/torchrun.sh): fix ci train error * feat(utils/evaluation.py): support evaluate on validation dataset * fix(utils/evaluation.py): fix demo error * fix(ci_scripts/train/ci_7B_sft.py): fix ci train error * feat(initialize/launch.py): set default value for valid_bsz and valid_every * fix(ci_scripts/train): restore ci update * docs(configs/7B_sft.py): update comment for config * fix(config.json): delete config.json * fix evaluation bug in scheduler when use_flash_attn=False * feat(scheduler/no_pipeline_scheduler.py): support micro_bsz>1 in no pp * modify the jugement in pp and no-pp scheduler * modify the data_process_func in evaluation * fix bugs when use_flash_attn=False * rename symbol * feat(configs/7B_sft.py): change para valid_bsz to valid_micro_num * feat(scheduler/no_pipeline_scheduler.py): update para set _grad_accum_batch_size --------- Co-authored-by: 黄婷 Co-authored-by: huangting.p Co-authored-by: yingtongxiong <974106207@qq.com> * feat(*): support no apex (#166) * support no-apex * add default for use_apex * fix lint * modify the RMSNormTorch * remove some comments * remove use_apex parameter * remove some unnecessary code * refactor(*): refactor the code with no-apex (#170) * support no-apex * add default for use_apex * fix lint * modify the RMSNormTorch * remove some comments * remove use_apex parameter * remove some unnecessary code * optimize the code including import * remove the import RMSNorm * remove warnings * refactor(scheduler): rewrite pipeline scheduler (#138) * refactor(scheduler): rewrite pipeline scheduler * fix(*): fix pipeline scheduler bugs * fix(*): fix merge bug * feat(*): update codes with todo tag * feat(*): add comments * feat(internlm/core/scheduler): update recv_prev/next logic * feat(utils/evaluation.py): update sche metric hook for valid --------- Co-authored-by: huangting.p * feat(*): support fp32 training (#155) * support float32 training * fix lint * add adaptation in model/utils.py * remove some unnecessary code * fix lint * feat(optim): add support for fp32 zero * Revert "Merge pull request #2 from SolenoidWGT/fp32_zero" This reverts commit 53fc50b0e52f12466e8dc8ec14c5e22b217537c8, reversing changes made to 40f24d0a73fff5c083e11c18d4a07ad16aaabab3. revert commit * merge develop * Update utils.py * support fp32 in zero optimizer * modify the dtype --------- Co-authored-by: wangguoteng.p * feat(*): support sequence_parallel (#180) * support sequence_parallel for no pipeline * sequence_parallel does not support no-flash-attn * support sequence parallel for pipeline * add memory profiler * Update 13B.py * add memory profiler * fix evaluation bug * remove some unnecessary code * remove some unnecessary code * Update parallel_context.py * modify the config * remove memory profiler * modify the config * support selective dropout * feat(monitor): support monitor and alert (#175) * feat(monitor): support monitor and alert * feat(monitor.py): fix demo error * feat(monitor.py): move cmd monitor args to config file * feat(hybrid_zero_optim.py): if overflow occurs send alert msg * feat(monitor.py): remove alert msg filter * feat(monitor.py): optimize class MonitorTracker * feat(monitor.py): optimize code * feat(monitor.py): optimize code * feat(monitor.py): optimize code * feat(monitor.py): optimize code * feat(train.py): update print to log * style(ci): fix lint error * fix(utils/evaluation.py): remove useless code * fix(model/modeling_internlm.py): fix lint error --------- Co-authored-by: huangting4201 * feat(ckpt): add async upload and ckpt snapshot (#161) * use fp16 in instruction (#80) * delete torch_dtype of README's example code (#100) * feat(ckpt): support async ckpt upload and ckpt snapshot --------- Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com> Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com> Co-authored-by: wangguoteng.p * feat(ckpt): add auto ckpt load and singal quit (#189) Co-authored-by: wangguoteng.p * Revert "feat(ckpt): add auto ckpt load and singal quit (#189)" (#192) This reverts commit a45a91bb843cf0b10b8b014a6ef35e695871f91b. * refactor(solver/optimizer): improve optimizer memory (#193) * refactor(solver/optimizer): improve optimizer memory * feat(data): remove useless dataset type ids map * Feat/optimizer (#194) * feat(optimier.py): reduce memory footprint and avoid _check_overflow call * feat(optimier.py): reduce memory footprint and avoid _check_overflow call * feat(optimizer.py): overlap compute norm with allreduce * update var and function name * update function compute norm (#197) Co-authored-by: ChenQiaoling00 * feat(optimizer/hybrid_zero_optim.py): overlap gradients last bucket allreduce and compute norm (#196) * support gradients allreduce and compute norm overlap * fix para set error * remove timer cal_norm for testing * feat(optimizer/hybrid_zero_optim.py): support group global norm * format(lint): fix lint error * feat(optimizer/store.py): update code based on comment --------- Co-authored-by: ChenQiaoling00 Co-authored-by: huangting4201 <1538303371@qq.com> * fix(ci): fix ci train error (#199) * fix/ci train error (#200) * fix(ci): fix ci train error * fix(ci): fix ci train error * fix(ci): fix ci train error * fix(train.py): fix scheduler metric hook skip error (#204) * Merge main to develop (#203) * fix/fix_submodule_err (#61) * fix/fix_submodule_err --------- Co-authored-by: ChenQiaoling00 * fix issue templates (#65) * fix(tokenizer): refactor tokenizer and update usage in readme (#51) * update tokenizer example * fix(readme, requirements): fix typo at Chinese readme and select a lower version of transformers (#73) * fix a typo in readme * in order to find InternLMTokenizer, select a lower version of Transformers --------- Co-authored-by: gouhchangjiang * [Doc] Add wechat and discord link in readme (#78) * Doc:add wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * Doc:update wechat and discord link * [Docs]: add Japanese README (#43) * Add Japanese README * Update README-ja-JP.md replace message * Update README-ja-JP.md * add repetition_penalty in GenerationConfig in web_demo.py (#48) Co-authored-by: YWMditto <862779238@qq.com> * use fp16 in instruction (#80) * [Enchancement] add more options for issue template (#77) * [Enchancement] add more options for issue template * update qustion icon * fix link * Use tempfile for convert2hf.py (#23) Fix https://github.com/InternLM/InternLM/issues/50 * delete torch_dtype of README's example code (#100) * set the value of repetition_penalty to 1.0 to avoid random outputs (#99) * Update web_demo.py (#97) Remove meaningless log. * [Fix]Fix wrong string cutoff in the script for sft text tokenizing (#106) * docs(install.md): update dependency package transformers version to >= 4.28.0 (#124) Co-authored-by: 黄婷 * docs(LICENSE): add license (#125) * add license of colossalai and flash-attn * fix lint * modify the name * fix AutoModel map in convert2hf.py (#116) * variables are not printly as expect (#114) * feat(solver): fix code to adapt to torch2.0 and provide docker images (#128) * feat(solver): fix code to adapt to torch2.0 * docs(install.md): publish internlm environment image * docs(install.md): update dependency packages version * docs(install.md): update default image --------- Co-authored-by: 黄婷 * add demo test (#132) Co-authored-by: qa-caif-cicd * fix web_demo cache accelerate (#133) * Doc: add twitter link (#141) * Feat add checkpoint fraction (#151) * feat(config): add checkpoint_fraction into config * feat: remove checkpoint_fraction from configs/7B_sft.py --------- Co-authored-by: wangguoteng.p * [Doc] update deployment guide to keep consistency with lmdeploy (#136) * update deployment guide * fix error * use llm partition (#159) Co-authored-by: qa-caif-cicd * test(ci_scripts): clean test data after test, remove unnecessary global variables, and other optimizations (#165) * test: optimization of ci scripts(variables, test data cleaning, etc). * chore(workflows): disable ci job on push. * fix: update partition * test(ci_scripts): add install requirements automaticlly,trigger event about lint check and other optimizations (#174) * add pull_request in lint check * use default variables in ci_scripts * fix format * check and install requirements automaticlly * fix format --------- Co-authored-by: qa-caif-cicd * feat(profiling): add a simple memory profiler (#89) * feat(profiling): add simple memory profiler * feat(profiling): add profiling argument * feat(CI_workflow): Add PR & Issue auto remove workflow (#184) * feat(ci_workflow): Add PR & Issue auto remove workflow Add a workflow for stale PR & Issue auto remove - pr & issue well be labeled as stale for inactive in 7 days - staled PR & Issue well be remove in 7 days - run this workflow every day on 1:30 a.m. * Update stale.yml * feat(bot): Create .owners.yml for Auto Assign (#176) * Create .owners.yml: for issue/pr assign automatically * Update .owners.yml * Update .owners.yml fix typo * [feat]: add pal reasoning script (#163) * [Feat] Add PAL inference script * Update README.md * Update tools/README.md Co-authored-by: BigDong * Update tools/pal_inference.py Co-authored-by: BigDong * Update pal script * Update README.md * restore .ore-commit-config.yaml * Update tools/README.md Co-authored-by: BigDong * Update tools/README.md Co-authored-by: BigDong * Update pal inference script * Update READMD.md * Update internlm/utils/interface.py Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com> * Update pal script * Update pal script * Update script * Add docstring * Update format * Update script * Update script * Update script --------- Co-authored-by: BigDong Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com> * test(ci_scripts): add timeout settings and clean work after the slurm job (#185) * restore pr test on develop branch * add mask * add post action to cancel slurm job * remove readonly attribute on job log * add debug info * debug job log * try stdin * use stdin * set default value avoid error * try setting readonly on job log * performance echo * remove debug info * use squeue to check slurm job status * restore the lossed parm * litmit retry times * use exclusive to avoid port already in use * optimize loop body * remove partition * add {} for variables * set env variable for slurm partition --------- Co-authored-by: qa-caif-cicd * refactor(tools): move interface.py and import it to web_demo (#195) * move interface.py and import it to web_demo * typo * fix(ci): fix lint error * fix(ci): fix lint error --------- Co-authored-by: Sun Peng Co-authored-by: ChenQiaoling00 Co-authored-by: Kai Chen Co-authored-by: Yang Gao Co-authored-by: Changjiang GOU Co-authored-by: gouhchangjiang Co-authored-by: vansin Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com> Co-authored-by: YWMditto <862779238@qq.com> Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com> Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com> Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com> Co-authored-by: Shuo Zhang Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com> Co-authored-by: 黄婷 Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com> Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com> Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com> Co-authored-by: qa-caif-cicd Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com> Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com> Co-authored-by: wangguoteng.p Co-authored-by: lvhan028 Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com> Co-authored-by: cx <759046501@qq.com> Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com> Co-authored-by: del-zhenwu Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com> Co-authored-by: BigDong Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com> Co-authored-by: huangting4201 * fix(pipeline_scheduler.py): fix tensor shape err and comm block (#210) * feat(train.py): support torch profiler (#201) * feat(train.py): support torch profiling * feat(train.py): optimize initialize_llm_profile * feat(train.py): profiling with tp0 and dp0 * move sequence parallel context manager to evalation func * fix lint * move the process for type_ids to load_new_batch * fix lint --------- Co-authored-by: yingtongxiong <974106207@qq.com> * feat(ckpt): add auto ckpt load and singal quit (#216) Co-authored-by: wangguoteng.p * feat(memory_profiler): improve memory profiler (#217) * Feat/overlap_bcast_forward (#218) * feat/support bcast forward overlao * feat/optimize the bcast call * feat/optimize the bcast call * feat/optimize the bcast call * fix lint * fix lint * fix lint * fix lint * add torch.cuda.synchronize in save_checkpoint --------- Co-authored-by: sunpeng * fix(*): move sequence_parallel to parallel config (#224) * move sequence_parallel to parallel config * set the sequece_parallel default value is False * fix lint * fix lint * fix lint * Feat/example training internlm (#212) * feat(train/training_internlm.py): move common init funcs to internlm/train * feat(train/training_internlm.py): update some public funcs * feat(train/training_internlm.py): update some public funcs * feat(evaluation.py): adapt evaluate to streaming dataset * feat(train/training_internlm.py): minor update based on comments * fix(training_internlm.py): set train dataloader persistent_workers true only when num_worker>0 * fix(training_internlm.py): fix demo error * feat(data/utils.py): add new dataset type code for streaming dataset (#225) * test(model): support fp32 with flash_attn (#223) * support tf32 with flash * move autocast to attention * fix lint * fix lint * fix lint * fix lint * fix some bugs in model * modify the convert dtype * fix(pipeline): modify the sequence_parallel in pipeline (#227) * move sequence_parallel to parallel config * set the sequece_parallel default value is False * fix lint * fix lint * fix lint * modify the sequence_parallel in pp * feat(init): add skip args check flag and add zero overlap flag (#222) * feat(init): add skip args check flag * fix(optim): add param overlap enable flag * fix(ci): fix train error (#228) Co-authored-by: huangting4201 * fix(writer): fix tensorboard resume bug (#229) * fix(train.py): fix overflow grad norm error (#230) * feat(ckpt): add train config into ckpt (#231) * docs(doc/code-docs): support readthedocs (#245) * feat(doc/code-docs): add code-docs for readthedocs * feat(doc/code-docs): add .readthedocs.yaml configuration file * feat(doc/code-docs): update .readthedocs.yaml configuration file * feat(doc/code-docs): update .readthedocs.yaml configuration file * feat(doc/code-docs): update .readthedocs.yaml configuration file * feat(doc/code-docs): update .readthedocs.yaml configuration file * feat(doc/code-docs): update code-docs * [Daily Pull] Merge Main to Develop 20230901 (#260) * Standard and experiment docker (#220) * feat:standard docker image * feat:standard docker image * feat: standard dockerfile * feat: standard dockerfile * feat: standard dockerfile * feat: standard dockerfile * feat: standard dockerfile * feat: standard dockerfile * feat: standard dockerfile * experiment and standard docker * experiment and standard docker * fix(core/trainer.py): fix streaming train state load error (#247) * Fix requirement (#243) * feat:standard docker image * feat:standard docker image * fix: a little problem * fix: a little problem * fix(eval): StreamingDataset does not have an __len__ method. (#251) * fix(metric): argument missing in getting loss metrics. (#256) * feat(model): implement uniform_init for tensor. (#252) * Implement uniform_init for tensor. * Fix functinal calling bugs: normal->uniform. * Format editting: remove unused torch importing. --------- Co-authored-by: li126com <43110891+li126com@users.noreply.github.com> Co-authored-by: huangting4201 <1538303371@qq.com> Co-authored-by: Shuo Zhang Co-authored-by: Ryan (张磊) Co-authored-by: Pryest <54388244+Pryest@users.noreply.github.com> --------- Co-authored-by: huangting4201 <1538303371@qq.com> Co-authored-by: 黄婷 Co-authored-by: ChenQiaoling00 Co-authored-by: Kai Chen Co-authored-by: Yang Gao Co-authored-by: Changjiang GOU Co-authored-by: gouhchangjiang Co-authored-by: vansin Co-authored-by: Ikko Eltociear Ashimine Co-authored-by: YWMditto <46778265+YWMditto@users.noreply.github.com> Co-authored-by: YWMditto <862779238@qq.com> Co-authored-by: WRH <12756472+wangruohui@users.noreply.github.com> Co-authored-by: liukuikun <24622904+Harold-lkk@users.noreply.github.com> Co-authored-by: x54-729 <45304952+x54-729@users.noreply.github.com> Co-authored-by: Shuo Zhang Co-authored-by: Miao Zheng <76149310+MeowZheng@users.noreply.github.com> Co-authored-by: huangting.p Co-authored-by: ytxiong <45058324+yingtongxiong@users.noreply.github.com> Co-authored-by: Zaida Zhou <58739961+zhouzaida@users.noreply.github.com> Co-authored-by: kkscilife <126147887+kkscilife@users.noreply.github.com> Co-authored-by: qa-caif-cicd Co-authored-by: hw <45089338+MorningForest@users.noreply.github.com> Co-authored-by: yingtongxiong <974106207@qq.com> Co-authored-by: cx <759046501@qq.com> Co-authored-by: wangguoteng.p Co-authored-by: huangting4201 Co-authored-by: Guoteng <32697156+SolenoidWGT@users.noreply.github.com> Co-authored-by: lvhan028 Co-authored-by: zachtzy <141206206+zachtzy@users.noreply.github.com> Co-authored-by: Jaylin Lee <61487970+APX103@users.noreply.github.com> Co-authored-by: del-zhenwu Co-authored-by: Shaoyuan Xie <66255889+Daniel-xsy@users.noreply.github.com> Co-authored-by: BigDong Co-authored-by: Wenwei Zhang <40779233+ZwwWayne@users.noreply.github.com> Co-authored-by: li126com <43110891+li126com@users.noreply.github.com> Co-authored-by: Ryan (张磊) Co-authored-by: Pryest <54388244+Pryest@users.noreply.github.com> --- .readthedocs.yml | 28 ++++++++++++ doc/code-docs/Makefile | 20 +++++++++ doc/code-docs/make.bat | 35 +++++++++++++++ doc/code-docs/requirements.txt | 6 +++ doc/code-docs/source/checkpoint.rst | 2 + doc/code-docs/source/conf.py | 62 +++++++++++++++++++++++++ doc/code-docs/source/index.rst | 70 +++++++++++++++++++++++++++++ doc/code-docs/source/initialize.rst | 35 +++++++++++++++ doc/code-docs/source/install.md | 70 +++++++++++++++++++++++++++++ doc/code-docs/source/monitor.rst | 10 +++++ doc/code-docs/source/parallel.rst | 23 ++++++++++ doc/code-docs/source/profiler.rst | 11 +++++ doc/code-docs/source/training.rst | 2 + 13 files changed, 374 insertions(+) create mode 100644 .readthedocs.yml create mode 100644 doc/code-docs/Makefile create mode 100644 doc/code-docs/make.bat create mode 100644 doc/code-docs/requirements.txt create mode 100644 doc/code-docs/source/checkpoint.rst create mode 100644 doc/code-docs/source/conf.py create mode 100644 doc/code-docs/source/index.rst create mode 100644 doc/code-docs/source/initialize.rst create mode 100644 doc/code-docs/source/install.md create mode 100644 doc/code-docs/source/monitor.rst create mode 100644 doc/code-docs/source/parallel.rst create mode 100644 doc/code-docs/source/profiler.rst create mode 100644 doc/code-docs/source/training.rst diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000..650ee88 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,28 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the OS, Python version and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.8" + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: doc/code-docs/source/conf.py + fail_on_warning: false + +# Optionally build your docs in additional formats such as PDF +formats: + - pdf + +# Optional but recommended, declare the Python requirements required +# to build your documentation +# See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + install: + - requirements: doc/code-docs/requirements.txt diff --git a/doc/code-docs/Makefile b/doc/code-docs/Makefile new file mode 100644 index 0000000..d0c3cbf --- /dev/null +++ b/doc/code-docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/doc/code-docs/make.bat b/doc/code-docs/make.bat new file mode 100644 index 0000000..747ffb7 --- /dev/null +++ b/doc/code-docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/doc/code-docs/requirements.txt b/doc/code-docs/requirements.txt new file mode 100644 index 0000000..9a4bb3d --- /dev/null +++ b/doc/code-docs/requirements.txt @@ -0,0 +1,6 @@ +Sphinx +sphinx-autobuild +recommonmark +sphinx_rtd_theme +sphinx_markdown_tables +autodoc_pydantic==1.9 \ No newline at end of file diff --git a/doc/code-docs/source/checkpoint.rst b/doc/code-docs/source/checkpoint.rst new file mode 100644 index 0000000..3ceed08 --- /dev/null +++ b/doc/code-docs/source/checkpoint.rst @@ -0,0 +1,2 @@ +Model Checkpointing +=================== \ No newline at end of file diff --git a/doc/code-docs/source/conf.py b/doc/code-docs/source/conf.py new file mode 100644 index 0000000..5986f06 --- /dev/null +++ b/doc/code-docs/source/conf.py @@ -0,0 +1,62 @@ +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +import os +import sys + +project = "InternLM" +copyright = "2023, InternLM Team" +author = "InternLM Team" +release = "v0.2.0" + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +extensions = [ + "recommonmark", + "sphinx_rtd_theme", + "sphinx.ext.viewcode", + "sphinx.ext.autodoc", + "sphinxcontrib.autodoc_pydantic", + "sphinx.ext.autosectionlabel", + "sphinx.ext.napoleon", +] + +pygments_style = "sphinx" + +# autodoc_pyandtic config +autodoc_pydantic_model_show_field_summary = False +autodoc_pydantic_field_signature_prefix = " " +autodoc_pydantic_model_signature_prefix = "class" +autodoc_pydantic_model_show_json = False +autodoc_pydantic_model_show_config_summary = False +autodoc_pydantic_model_show_config_member = False +autodoc_pydantic_model_show_validator_summary = False +autodoc_pydantic_model_show_validator_members = False +autodoc_pydantic_model_summary_list_order = "bysource" +autodoc_pydantic_model_member_order = "bysource" +autodoc_pydantic_field_list_validators = False + +templates_path = ["_templates"] + +exclude_patterns = [] + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_theme = "sphinx_rtd_theme" +html_static_path = ["_static"] + +sys.path.insert(0, os.path.abspath("../../../")) + +# Prepend module names to class descriptions +add_module_names = True + +autoclass_content = "init" + +autodoc_mock_imports = ["apex", "torch"] diff --git a/doc/code-docs/source/index.rst b/doc/code-docs/source/index.rst new file mode 100644 index 0000000..3011df6 --- /dev/null +++ b/doc/code-docs/source/index.rst @@ -0,0 +1,70 @@ +.. InternLM documentation master file, created by + sphinx-quickstart on Mon Aug 28 17:33:28 2023. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +InternLM +======== + +Environment Setup +------------------- + +.. toctree:: + :maxdepth: 2 + + install + +Model Setup +------------------- + +.. toctree:: + :maxdepth: 2 + + initialize + +Training API +------------------- + +.. toctree:: + :maxdepth: 2 + + training + +Parallel Training +------------------- + +.. toctree:: + :maxdepth: 2 + + parallel + +Model Checkpointing +------------------- + +.. toctree:: + :maxdepth: 2 + + checkpoint + +Profiler +------------------- + +.. toctree:: + :maxdepth: 2 + + profiler + +Monitor +------------------- + +.. toctree:: + :maxdepth: 2 + + monitor + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` diff --git a/doc/code-docs/source/initialize.rst b/doc/code-docs/source/initialize.rst new file mode 100644 index 0000000..a638c33 --- /dev/null +++ b/doc/code-docs/source/initialize.rst @@ -0,0 +1,35 @@ +Training Setup +============== + +.. _InternLM-args: + +Argument Parsing +---------------- +InternLM uses the `argparse `_ library to supply commandline +configuration to the InternLM runtime. Use ``internlm.initialize.get_default_parser()`` to get InternLM's default +parser with some builtin arguments, users can add custom parameters to this parser. + +.. code-block:: python + + # Get InternLM default parser + parser = internlm.initialize.get_default_parser() + # Add new argument + parser.add_argument("--user_arg", type=int, default=-1, help="arguments add by user.") + cmd_args = parser.parse_args() + +.. autofunction:: internlm.initialize.get_default_parser + + +.. _InternLM-init: + +Model Initialization +------------------------- + +Optimizer Initialization +------------------------- + +Dataloader Initialization +------------------------- + +Trainer Initialization +------------------------- diff --git a/doc/code-docs/source/install.md b/doc/code-docs/source/install.md new file mode 100644 index 0000000..26f57c0 --- /dev/null +++ b/doc/code-docs/source/install.md @@ -0,0 +1,70 @@ +## Installation + +### Environment Preparation +The required packages and corresponding version are shown as follows: +- Python == 3.10 +- GCC == 10.2.0 +- MPFR == 4.1.0 +- CUDA >= 11.7 +- Pytorch >= 1.13.1 +- Transformers >= 4.28.0 +- Flash-Attention >= v1.0.5 +- Apex == 23.05 +- GPU with Ampere or Hopper architecture (such as H100, A100) +- Linux OS + +After installing the above dependencies, some system environment variables need to be updated: +```bash +export CUDA_PATH={path_of_cuda_11.7} +export GCC_HOME={path_of_gcc_10.2.0} +export MPFR_HOME={path_of_mpfr_4.1.0} +export LD_LIBRARY_PATH=${GCC_HOME}/lib64:${MPFR_HOME}/lib:${CUDA_PATH}/lib64:$LD_LIBRARY_PATH +export PATH=${GCC_HOME}/bin:${CUDA_PATH}/bin:$PATH +export CC=${GCC_HOME}/bin/gcc +export CXX=${GCC_HOME}/bin/c++ +``` + +### Environment Installation +Clone the project `internlm` and its dependent submodules from the github repository, as follows: +```bash +git clone git@github.com:InternLM/InternLM.git --recurse-submodules +``` + +It is recommended to build a Python-3.10 virtual environment using conda and install the required dependencies based on the `requirements/` files: +```bash +conda create --name internlm-env python=3.10 -y +conda activate internlm-env +cd internlm +pip install -r requirements/torch.txt +pip install -r requirements/runtime.txt +``` + +Install flash-attention (version v1.0.5): +```bash +cd ./third_party/flash-attention +python setup.py install +cd ./csrc +cd fused_dense_lib && pip install -v . +cd ../xentropy && pip install -v . +cd ../rotary && pip install -v . +cd ../layer_norm && pip install -v . +cd ../../../../ +``` + +Install Apex (version 23.05): +```bash +cd ./third_party/apex +pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ +cd ../../ +``` + +### Environment Image +Users can obtain an image with the InternLM runtime environment installed from https://hub.docker.com/r/sunpengsdu/internlm. The commands for pulling the image and starting the container are as follows: + +```bash +# pull image +docker pull sunpengsdu/internlm:torch1.13-cuda11.7-flashatten1.0.5-centos +# start container +docker run --gpus all -d -it --shm-size=2gb --name myinternlm sunpengsdu/internlm:torch1.13-cuda11.7-flashatten1.0.5-centos +docker exec -it myinternlm bash +``` diff --git a/doc/code-docs/source/monitor.rst b/doc/code-docs/source/monitor.rst new file mode 100644 index 0000000..ff8cd1b --- /dev/null +++ b/doc/code-docs/source/monitor.rst @@ -0,0 +1,10 @@ +Monitor and Alert +================= + + +Monitoring +----------------- + + +Alerting +----------------- diff --git a/doc/code-docs/source/parallel.rst b/doc/code-docs/source/parallel.rst new file mode 100644 index 0000000..3515847 --- /dev/null +++ b/doc/code-docs/source/parallel.rst @@ -0,0 +1,23 @@ +Parallel Training +================= + +.. 整体说一下并行配置使用方式,接下来再分模块详细说明 + +Tensor Parallel +----------------- + + +Pipeline Parallel +----------------- + + +Sequence Parallel +----------------- + + +Data Parallel +----------------- + + +ZeRO1.5 +----------------- \ No newline at end of file diff --git a/doc/code-docs/source/profiler.rst b/doc/code-docs/source/profiler.rst new file mode 100644 index 0000000..c10f425 --- /dev/null +++ b/doc/code-docs/source/profiler.rst @@ -0,0 +1,11 @@ +Profiler +======== + +.. 可介绍torch profiler, memory profiler的使用 + +Torch Profiler +----------------- + + +Memory Profiler +----------------- \ No newline at end of file diff --git a/doc/code-docs/source/training.rst b/doc/code-docs/source/training.rst new file mode 100644 index 0000000..e9ee124 --- /dev/null +++ b/doc/code-docs/source/training.rst @@ -0,0 +1,2 @@ +Training API +============ \ No newline at end of file From b9202b12bcc23959c2c3b9539c363a8f323e23e8 Mon Sep 17 00:00:00 2001 From: huangting4201 <1538303371@qq.com> Date: Fri, 1 Sep 2023 13:24:46 +0800 Subject: [PATCH 13/34] feat(utils/writer.py): support writer add_scalars for writing dict data (#257) * feat(utils/writer.py): support writer add_scalars interface for writing dict data * feat(hybrid_zero_optim.py): change grad_norm_groups list to dict --- .../solver/optimizer/hybrid_zero_optim.py | 35 ++++++++++-------- internlm/train/training_internlm.py | 36 +++++++++++-------- internlm/utils/writer.py | 8 +++++ train.py | 5 ++- 4 files changed, 52 insertions(+), 32 deletions(-) diff --git a/internlm/solver/optimizer/hybrid_zero_optim.py b/internlm/solver/optimizer/hybrid_zero_optim.py index 8bdeccf..63d2bfa 100644 --- a/internlm/solver/optimizer/hybrid_zero_optim.py +++ b/internlm/solver/optimizer/hybrid_zero_optim.py @@ -497,6 +497,7 @@ class HybridZeroOptimizer(BaseOptimizer): grads = [self.padding_grad] params = [self.padding_tensor] + norm = 0 if self._clip_grad_norm > 0: # this norm is before scaling, it will be very large norm = compute_norm( @@ -542,15 +543,15 @@ class HybridZeroOptimizer(BaseOptimizer): self._param_store.clear_grads_of_previous_reduced_params() # compute norm for gradients in the last bucket - total_norms = [] + total_norms = {} for group_id in range(self.num_param_groups): - total_norms.append( - self._compute_norm_with_stage( - group_id=group_id, - last_bucket=True, - last_stage=True, - previous_norm=groups_norms[group_id], - ) + group_name = self.param_groups[group_id]["name"] if "name" in self.param_groups[group_id] else "default" + group_name = f"{group_id}_{group_name}" + total_norms[group_name] = self._compute_norm_with_stage( + group_id=group_id, + last_bucket=True, + last_stage=True, + previous_norm=groups_norms[group_id], ) timer("sync_grad").start() @@ -569,7 +570,7 @@ class HybridZeroOptimizer(BaseOptimizer): # found_inf = self._check_overflow() # Because you may encounter inf when computing norm - if -1 in norms: + if -1 in norms.values(): found_inf = True loss_scale = float(self.loss_scale.item()) # backup @@ -617,15 +618,17 @@ class HybridZeroOptimizer(BaseOptimizer): # unscale and clip grads # get the global norm - global_norm_groups = [] + global_norm_groups = {} if self._clip_grad_norm > 0: - for norm in norms: - global_norm_groups.append(norm**0.5) + for group_name, norm in norms.items(): + global_norm_groups[group_name] = norm**0.5 # the following operations are performed only on the rank to which parameters are assigned. if gpc.config.model.dtype is not torch.float32: - if len(single_grad_partition_groups) != 0: - self._unscale_and_clip_grads(single_grad_partition_groups, global_norm_groups, loss_scale) + if len(single_grad_partition_groups) != 0 and self._clip_grad_norm > 0: + self._unscale_and_clip_grads( + single_grad_partition_groups, list(global_norm_groups.values()), loss_scale + ) # update the parameters timer("step").start() @@ -652,7 +655,9 @@ class HybridZeroOptimizer(BaseOptimizer): # update gradients may not be needed here, because the sync_params function is used in initialization, # so synchronization is maintained - return True, [global_norm / loss_scale for global_norm in global_norm_groups] + for group_name, global_norm in global_norm_groups.items(): + global_norm_groups[group_name] = global_norm / loss_scale + return True, global_norm_groups def broadcast_params(self): handles = [] diff --git a/internlm/train/training_internlm.py b/internlm/train/training_internlm.py index bab56f1..9c2ded0 100644 --- a/internlm/train/training_internlm.py +++ b/internlm/train/training_internlm.py @@ -389,23 +389,31 @@ def record_current_batch_training_metrics( line = "" for key, value in infos.items(): line += f"{key}={value} " - writer.add_scalar(key=key, value=value, step=train_state.step_count) + if isinstance(value, dict): + writer.add_scalars(key=key, value=value, step=train_state.step_count) + else: + writer.add_scalar(key=key, value=value, step=train_state.step_count) if update_panel: + # metrics shown with dashboard panels + panel_metrics = { + "step": batch_count, + "lr": lr, + "num_consumed_tokens": train_state.num_consumed_tokens, + "loss": loss.item(), + "flops": tflops, + "tgs": tk_per_gpu, + "acc": acc_perplex["acc"], + "perplexity": acc_perplex["perplexity"], + "fwd_bwd_time": fwd_bwd_time, + } + for norm_key, norm_value in grad_norm.items(): + panel_metrics[norm_key] = norm_value + logger.info( - line, - extra={ - "step": batch_count, - "lr": lr, - "num_consumed_tokens": train_state.num_consumed_tokens, - "grad_norm": grad_norm, - "loss": loss.item(), - "flops": tflops, - "tgs": tk_per_gpu, - "acc": acc_perplex["acc"], - "perplexity": acc_perplex["perplexity"], - "fwd_bwd_time": fwd_bwd_time, - }, + "{line}", + line=line, + extra=panel_metrics, ) else: logger.info(line) diff --git a/internlm/utils/writer.py b/internlm/utils/writer.py index 0997817..b519b95 100644 --- a/internlm/utils/writer.py +++ b/internlm/utils/writer.py @@ -134,6 +134,14 @@ class Writer: except Exception: traceback.print_exc() + def add_scalars(self, key, value, step): + try: + assert isinstance(value, dict) + if self.enable_tb and self.tb_writer is not None: + self.tb_writer.add_scalars(main_tag=key, tag_scalar_dict=value, global_step=step) + except Exception: + traceback.print_exc() + def add_text(self, key, value, step): try: if self.enable_tb and self.tb_writer is not None: diff --git a/train.py b/train.py index de7cc7c..902f8c0 100644 --- a/train.py +++ b/train.py @@ -6,7 +6,6 @@ import time import traceback from functools import partial -import numpy as np import torch import torch.distributed as dist @@ -236,7 +235,7 @@ def main(args): train_state.step_count += 1 else: train_state.inf_nan_skip_batches += 1 # record the amount of updating parameters unsuccessfully. - if -1 in grad_norm_groups and gpc.is_rank_for_log(): # -1 encodes a specific failure case + if -1 in grad_norm_groups.values() and gpc.is_rank_for_log(): # -1 encodes a specific failure case logger.warning(f"Warning: skip parameter update at step {batch_count}.") send_alert_message( address=gpc.config.alert_address, @@ -257,7 +256,7 @@ def main(args): trainer=trainer, start_time=start_time, loss=loss, - grad_norm=np.array(grad_norm_groups), + grad_norm=grad_norm_groups, metric=metric, update_panel=uniscale_logger is not None, ) From 860de0aa4679d1e7135ded1c03a79b0b815694ba Mon Sep 17 00:00:00 2001 From: Sun Peng Date: Fri, 1 Sep 2023 13:38:01 +0800 Subject: [PATCH 14/34] Feat/add runntime gpu test (#254) * feat: add gpu bench * feat/add allreduce runtime bench --------- Co-authored-by: sunpengsdu --- internlm/core/context/__init__.py | 2 + internlm/core/context/parallel_context.py | 6 + .../core/context/process_group_initializer.py | 55 ++++++ internlm/utils/gputest.py | 163 ++++++++++++++++++ train.py | 3 + 5 files changed, 229 insertions(+) create mode 100644 internlm/utils/gputest.py diff --git a/internlm/core/context/__init__.py b/internlm/core/context/__init__.py index 97021dc..3fc7deb 100644 --- a/internlm/core/context/__init__.py +++ b/internlm/core/context/__init__.py @@ -7,6 +7,7 @@ from .parallel_context import ( from .process_group_initializer import ( Initializer_Data, Initializer_Model, + Initializer_Nettest, Initializer_Pipeline, Initializer_Tensor, Initializer_Zero1, @@ -34,6 +35,7 @@ __all__ = [ "Initializer_Pipeline", "Initializer_Data", "Initializer_Zero1", + "Initializer_Nettest", "ProcessGroupInitializer", "Initializer_Model", "seed", diff --git a/internlm/core/context/parallel_context.py b/internlm/core/context/parallel_context.py index 87d3114..f1de5ad 100644 --- a/internlm/core/context/parallel_context.py +++ b/internlm/core/context/parallel_context.py @@ -143,6 +143,7 @@ class ParallelContext(metaclass=SingletonMeta): self.pipeline_parallel_size = 1 self.tensor_parallel_size = 1 self.zero1_parallel_size = -1 + self.nettest_parallel_size = 1 self.num_processes_on_current_node = -1 self.virtual_pipeline_parallel_size = None self.virtual_pipeline_parallel_rank = None @@ -442,6 +443,9 @@ class ParallelContext(metaclass=SingletonMeta): # instead, it should be calculated based on other parallel config self.data_parallel_size = self.world_size // (self.pipeline_parallel_size * self.tensor_parallel_size) + # the recommended nettest_parallel_size is 32 GPUs + self.nettest_parallel_size = 32 + if self.zero1_parallel_size <= 0: self.zero1_parallel_size = self.data_parallel_size @@ -454,6 +458,7 @@ class ParallelContext(metaclass=SingletonMeta): self.pipeline_parallel_size, self.tensor_parallel_size, self.zero1_parallel_size, + self.nettest_parallel_size, ] # run initialization of different process groups @@ -462,6 +467,7 @@ class ParallelContext(metaclass=SingletonMeta): initializers.append(pgroup_initializer.Initializer_Model(*initializer_args)) initializers.append(pgroup_initializer.Initializer_Tensor(*initializer_args)) initializers.append(pgroup_initializer.Initializer_Zero1(*initializer_args)) + initializers.append(pgroup_initializer.Initializer_Nettest(*initializer_args)) if self.pipeline_parallel_size > 1: initializers.append(pgroup_initializer.Initializer_Pipeline(*initializer_args)) for initializer in initializers: diff --git a/internlm/core/context/process_group_initializer.py b/internlm/core/context/process_group_initializer.py index 56cf16d..facb806 100644 --- a/internlm/core/context/process_group_initializer.py +++ b/internlm/core/context/process_group_initializer.py @@ -3,6 +3,7 @@ # adopted from https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context +import math from abc import ABC, abstractmethod from enum import Enum @@ -31,6 +32,9 @@ class ParallelMode(Enum): # zero1 parallel ZERO1 = "zero1" + # runntime network test + NETTEST = "nettest" + class ProcessGroupInitializer(ABC): """An object, knowing the parallelism configuration, that initializes parallel groups. @@ -52,6 +56,7 @@ class ProcessGroupInitializer(ABC): pipeline_parallel_size: int, tensor_parallel_size: int, zero1_parallel_size: int, + nettest_parallel_size: int, ): self.rank = rank self.world_size = world_size @@ -59,6 +64,7 @@ class ProcessGroupInitializer(ABC): self.pipeline_parallel_size = pipeline_parallel_size self.tensor_parallel_size = tensor_parallel_size self.zero1_parallel_size = zero1_parallel_size + self.nettest_parallel_size = nettest_parallel_size super().__init__() @abstractmethod @@ -332,3 +338,52 @@ class Initializer_Zero1(ProcessGroupInitializer): ranks_in_group = ranks return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode + + +class Initializer_Nettest(ProcessGroupInitializer): + """A ProcessGroupInitializer for network test, especailly for NCCL. + + Args: + rank (int): The rank of current process. + world_size (int): Size of whole communication world. + nettest_parallel_size (int): Size of a network test group. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.num_nettest_group = math.ceil(self.world_size / self.nettest_parallel_size) + + def init_dist_group(self, use_cpu: bool = False): + """Initialize tensor parallel groups, and assign local_ranks and groups to each gpu. + + Returns: + Tuple (local_rank, group_world_size, process_group, ranks_in_group, mode): + A Tensor parallelism's information tuple. + """ + local_rank = None + ranks_in_group = None + process_group = None + cpu_group = None + group_world_size = None + mode = ParallelMode.NETTEST + + for i in range(self.num_nettest_group): + ranks = [] + for j in range(self.nettest_parallel_size): + rank = i * self.nettest_parallel_size + j + if rank < self.world_size: + ranks.append(rank) + group = dist.new_group(ranks) + if use_cpu: + group_cpu = dist.new_group(ranks, backend="gloo") if dist.get_backend() != "gloo" else group + else: + group_cpu = None + + if self.rank in ranks: + local_rank = ranks.index(self.rank) + group_world_size = len(ranks) + process_group = group + cpu_group = group_cpu + ranks_in_group = ranks + + return local_rank, group_world_size, process_group, cpu_group, ranks_in_group, mode diff --git a/internlm/utils/gputest.py b/internlm/utils/gputest.py new file mode 100644 index 0000000..27ae9bd --- /dev/null +++ b/internlm/utils/gputest.py @@ -0,0 +1,163 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +import math +import socket + +import torch +import torch.distributed as dist +from flash_attn.modules.mha import FlashSelfAttention, SelfAttention +from torch.utils import benchmark + +from internlm.utils.logger import get_logger + +try: + import GPUtil + import psutil +except ImportError: + GPUtil, psutil = None, None + +from internlm.core.context import ParallelMode +from internlm.core.context import global_context as gpc +from internlm.utils.common import get_current_device + +logger = get_logger(__file__) + + +def benchmark_forward( + test_fn, + *inputs, + repeats=100, + amp=True, + amp_dtype=torch.float16, + **kwinputs, +): + """Use Pytorch Benchmark on the forward pass of an arbitrary function.""" + + def amp_wrapper(*inputs, **kwinputs): + with torch.autocast(device_type="cuda", dtype=amp_dtype, enabled=amp): + test_fn(*inputs, **kwinputs) + + bench_timer = benchmark.Timer( + stmt="test_fn_amp(*inputs, **kwinputs)", + globals={"test_fn_amp": amp_wrapper, "inputs": inputs, "kwinputs": kwinputs}, + num_threads=torch.get_num_threads(), + ) + used_time = bench_timer.timeit(repeats) + return used_time.mean + + +def flops(batch, seqlen, headdim, nheads, time_f): + """Compute the flops value of a GPU with give flashattention function""" + + flop = 4 * batch * seqlen**2 * nheads * headdim + return (flop / time_f / 10**12) if not math.isnan(time_f) else 0.0 + + +def get_gpu_temperature(): + """Get current GPU temperature.""" + try: + gpu_id = torch.cuda.current_device() + except AssertionError: + gpu_id = -1 + + if GPUtil is not None and gpu_id >= 0: + gpus = GPUtil.getGPUs() + gpu_temperature = gpus[gpu_id].temperature + else: + gpu_temperature = -1 + + return gpu_temperature + + +def get_cpu_temperature(): + """Get current CPU temperature.""" + + if psutil is not None: + cpu_temperature = psutil.sensors_temperatures()["coretemp"][0].current + else: + cpu_temperature = -1 + + return cpu_temperature + + +def bench_net(): + """Benchmark nccl performance for slow node detection.""" + + if gpc.get_world_size(ParallelMode.GLOBAL) <= 1: + return + + if gpc.is_rank_for_log(): + logger.info("benchmarking network speed ...") + + repeats = 100 + input_data = torch.randn( + 8 * 1024 * 1024, + device=get_current_device(), + dtype=torch.bfloat16, + ) + + def allreduce_fn(inputs): + dist.all_reduce(inputs, op=torch.distributed.ReduceOp.AVG, group=gpc.get_group(ParallelMode.NETTEST)) + + bench_timer = benchmark.Timer( + stmt="test_fn_amp(inputs)", + globals={"test_fn_amp": allreduce_fn, "inputs": input_data}, + num_threads=torch.get_num_threads(), + ) + allreduce_time = bench_timer.timeit(repeats).mean + allreduce_time = allreduce_time * 10**3 + allreduce_time_this = allreduce_time + allreduce_time = torch.Tensor([allreduce_time]).to(device=get_current_device()) + dist.all_reduce(allreduce_time, group=gpc.get_group(ParallelMode.GLOBAL)) + allreduce_time_avg = allreduce_time / gpc.get_world_size(ParallelMode.GLOBAL) + allreduce_time_avg = float(allreduce_time_avg.item()) + + if allreduce_time_this >= allreduce_time_avg * 1.05: + logger.warning( + f"Rank {gpc.get_local_rank(ParallelMode.GLOBAL)} NCCL test is slower than avg, " + f"Hostname {socket.gethostname()}, " + f"allreduce_time {allreduce_time_this:.2f}, avg {allreduce_time_avg:.2f}, " + f"CPU temp {get_cpu_temperature()}, GPU temp { get_gpu_temperature()}" + ) + + +def bench_gpu(use_flash_attn=True): + """Benchmark single GPU performance for slow node detection.""" + + if gpc.is_rank_for_log(): + logger.info("benchmarking gpu speed ...") + + headdim = 64 + dim = 2048 + batch_size, seqlen = 2, 1024 + nheads = dim // headdim + + inner_attn = FlashSelfAttention if use_flash_attn else SelfAttention + inner_attn = inner_attn(causal=True, softmax_scale=None, attention_dropout=0) + + qkv = torch.randn( + batch_size, + seqlen, + 3, + dim // headdim, + headdim, + device=get_current_device(), + dtype=torch.float16, + requires_grad=True, + ) + time_f = benchmark_forward(inner_attn, qkv) + speed = flops(batch_size, seqlen, headdim, nheads, time_f) + speed_this = speed + speed = torch.Tensor([speed]).to(device=get_current_device()) + dist.all_reduce(speed, group=gpc.get_group(ParallelMode.GLOBAL)) + speed_avg = speed / gpc.get_world_size(ParallelMode.GLOBAL) + speed_avg = float(speed_avg.item()) + + if speed_this <= speed_avg * 0.95: + logger.warning( + f"Rank {gpc.get_local_rank(ParallelMode.GLOBAL)} GPU is slower than avg, " + f"Hostname {socket.gethostname()}, " + f"tflops {speed_this:.2f}, avg {speed_avg:.2f}, " + f"CPU temp {get_cpu_temperature()}, GPU temp { get_gpu_temperature()}" + ) diff --git a/train.py b/train.py index 902f8c0..69cdd3c 100644 --- a/train.py +++ b/train.py @@ -35,6 +35,7 @@ from internlm.utils.common import ( parse_args, ) from internlm.utils.evaluation import evaluate_on_val_dls +from internlm.utils.gputest import bench_gpu, bench_net from internlm.utils.logger import get_logger, initialize_uniscale_logger from internlm.utils.megatron_timers import megatron_timer as timer from internlm.utils.model_checkpoint import CheckpointManager @@ -196,6 +197,8 @@ def main(args): for batch_count in range(train_state.batch_count, total_steps): if batch_count % 50 == 0: torch.cuda.empty_cache() + bench_gpu() + bench_net() start_time = time.time() timer("one-batch").start() From 74afbb0c772422ac02c61b836c1d4a6e6bebb5d4 Mon Sep 17 00:00:00 2001 From: huangting4201 <1538303371@qq.com> Date: Fri, 1 Sep 2023 15:46:33 +0800 Subject: [PATCH 15/34] feat(code-docs): add enum_tools in code-docs requirements (#264) Co-authored-by: huangting4201 --- doc/code-docs/requirements.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/code-docs/requirements.txt b/doc/code-docs/requirements.txt index 9a4bb3d..8cb9316 100644 --- a/doc/code-docs/requirements.txt +++ b/doc/code-docs/requirements.txt @@ -3,4 +3,5 @@ sphinx-autobuild recommonmark sphinx_rtd_theme sphinx_markdown_tables -autodoc_pydantic==1.9 \ No newline at end of file +autodoc_pydantic==1.9 +enum_tools \ No newline at end of file From 3d091c302d4b9cf47e7d109701d471e83ebbaec4 Mon Sep 17 00:00:00 2001 From: huangting4201 <1538303371@qq.com> Date: Mon, 4 Sep 2023 10:20:06 +0800 Subject: [PATCH 16/34] fix(doc/code-docs): autodoc shown error (#265) * feat(code-docs): test auto doc * feat(code-docs): test auto doc * feat(code-docs): test auto doc * feat(code-docs): test auto doc --- doc/code-docs/requirements.txt | 5 ++++- doc/code-docs/source/conf.py | 33 +++++++++++++++++++++++++++++++-- 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/doc/code-docs/requirements.txt b/doc/code-docs/requirements.txt index 8cb9316..8cbfddf 100644 --- a/doc/code-docs/requirements.txt +++ b/doc/code-docs/requirements.txt @@ -4,4 +4,7 @@ recommonmark sphinx_rtd_theme sphinx_markdown_tables autodoc_pydantic==1.9 -enum_tools \ No newline at end of file +enum_tools +numpy +torch +tqdm \ No newline at end of file diff --git a/doc/code-docs/source/conf.py b/doc/code-docs/source/conf.py index 5986f06..4bce035 100644 --- a/doc/code-docs/source/conf.py +++ b/doc/code-docs/source/conf.py @@ -42,6 +42,22 @@ autodoc_pydantic_model_summary_list_order = "bysource" autodoc_pydantic_model_member_order = "bysource" autodoc_pydantic_field_list_validators = False +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_init_with_doc = False +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True +napoleon_preprocess_types = False +napoleon_type_aliases = None +napoleon_attr_annotations = True + templates_path = ["_templates"] exclude_patterns = [] @@ -52,11 +68,24 @@ exclude_patterns = [] html_theme = "sphinx_rtd_theme" html_static_path = ["_static"] +# GitHub integration +html_context = { + "display_github": True, + "github_user": "pjlab", + "github_repo": "InternLM", + "github_version": "master", + "conf_py_path": "/doc/code-docs/source/", +} + sys.path.insert(0, os.path.abspath("../../../")) # Prepend module names to class descriptions add_module_names = True -autoclass_content = "init" +autoclass_content = "class" -autodoc_mock_imports = ["apex", "torch"] +autodoc_mock_imports = [ + "apex", + "torch", + "numpy", +] From 5238f15e2d1bc08dc9669f8950f32dd6d2b889c1 Mon Sep 17 00:00:00 2001 From: Shuo Zhang Date: Mon, 4 Sep 2023 23:14:07 +0800 Subject: [PATCH 17/34] fix(eval): no need to check length of valid_dl when using streaming dataset (#274) * fix(eval): StreamingDataset does not have an __len__ method. * fix(eval): StreamingDataset has no len method --- internlm/utils/evaluation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internlm/utils/evaluation.py b/internlm/utils/evaluation.py index 872ef87..2b9a384 100644 --- a/internlm/utils/evaluation.py +++ b/internlm/utils/evaluation.py @@ -136,7 +136,7 @@ def evaluate_on_val_dls( dist.barrier() val_res = val_metric.get_metric() - if verbose and len(val_dl) != 0: + if verbose and (streaming or len(val_dl) != 0): val_loss = val_loss / (val_idx + 1 + 1e-6) infos = { "step": step_count, From f6e007f95bbda90e6eae517d2ec732cb6dafc633 Mon Sep 17 00:00:00 2001 From: Guoteng <32697156+SolenoidWGT@users.noreply.github.com> Date: Tue, 5 Sep 2023 17:40:48 +0800 Subject: [PATCH 18/34] feat(ckpt): fix checkpoint bugs and add feature enhancements. (#259) * fix(ckpt): ckpt bug fix and api refactor 1. fix latest ckpt query bug 2. add ckpt unit test 3. fix storage manager boto3/local client get_fns bug 4. fix only model load case zero fp32 buffer overwrite model weights bug. 5. add ckpt_type and add zero reload ci-test * fix(ckpt): fix ckpt and trainer bug * fix and refactor * fix base on comment * feat: add legacy api --- configs/7B_sft.py | 11 +- doc/en/usage.md | 10 +- doc/usage.md | 9 +- internlm/core/trainer.py | 43 +- internlm/initialize/launch.py | 41 +- internlm/initialize/legacy/__init__.py | 0 internlm/initialize/legacy/launch.py | 40 ++ internlm/solver/optimizer/__init__.py | 4 +- .../solver/optimizer/hybrid_zero_optim.py | 14 + internlm/utils/model_checkpoint.py | 434 ++++++++++++------ internlm/utils/storage_manager.py | 72 ++- tests/test_utils/common_fixture.py | 143 ++++++ tests/test_utils/test_model_checkpoint.py | 278 +++++++++++ tests/test_utils/test_storage_manager.py | 26 ++ train.py | 32 +- 15 files changed, 931 insertions(+), 226 deletions(-) create mode 100644 internlm/initialize/legacy/__init__.py create mode 100644 internlm/initialize/legacy/launch.py create mode 100644 tests/test_utils/common_fixture.py create mode 100644 tests/test_utils/test_model_checkpoint.py create mode 100644 tests/test_utils/test_storage_manager.py diff --git a/configs/7B_sft.py b/configs/7B_sft.py index 1f1993f..0ccc5e0 100644 --- a/configs/7B_sft.py +++ b/configs/7B_sft.py @@ -22,13 +22,16 @@ CHECKPOINT_EVERY = 50 ckpt = dict( enable_save_ckpt=False, # enable ckpt save. save_ckpt_folder=SAVE_CKPT_FOLDER, # Path to save training ckpt. - # load_ckpt_folder=LOAD_CKPT_FOLDER, # Ckpt path to resume training(load weights and scheduler/context states). - # load_model_only_folder=MODEL_ONLY_FOLDER, # Path to initialize with given model weights. - load_optimizer=True, # Wheter to load optimizer states when continuing training. + # load_ckpt_folder= dict(path=MODEL_ONLY_FOLDER, content=["model"], ckpt_type="normal"), + load_ckpt_folder="local:llm_ckpts/", + # 'load_ckpt_info' setting guide: + # 1. the 'path' indicate ckpt path, + # 2. the 'content‘ means what states will be loaded, support: "model", "sampler", "optimizer", "scheduler", "all" + # 3. the ’ckpt_type‘ means the type of checkpoint to be loaded, now only 'normal' type is supported. + load_ckpt_info=dict(path=MODEL_ONLY_FOLDER, content=("model",), ckpt_type="internlm"), checkpoint_every=CHECKPOINT_EVERY, async_upload=True, # async ckpt upload. (only work for boto3 ckpt) async_upload_tmp_folder="/dev/shm/internlm_tmp_ckpt/", # path for temporarily files during asynchronous upload. - snapshot_ckpt_folder="/".join([SAVE_CKPT_FOLDER, "snapshot"]), # directory for snapshot ckpt storage path. oss_snapshot_freq=int(CHECKPOINT_EVERY / 2), # snapshot ckpt save frequency. ) diff --git a/doc/en/usage.md b/doc/en/usage.md index e286edc..c10ed3e 100644 --- a/doc/en/usage.md +++ b/doc/en/usage.md @@ -115,19 +115,19 @@ If you want to load a model checkpoint when starting the training, you can confi ```python SAVE_CKPT_FOLDER = "local:/path/to/save/ckpt" -MODEL_ONLY_FOLDER = "local:/path/to/load/init/model/ckpt" LOAD_CKPT_FOLDER = "local:/path/to/load/resume/ckpt" ckpt = dict( save_ckpt_folder=SAVE_CKPT_FOLDER, # Path to save the model and optimizer checkpoints checkpoint_every=float("inf"), # Save a checkpoint every specified number of steps, default value is inf - load_model_only_folder=MODEL_ONLY_FOLDER, # Path to load the initial model weights, only load model weights without loading optimizer weights, training will start from the first step - load_ckpt_folder=LOAD_CKPT_FOLDER, # Path to load the weights of the model and optimizer for resuming training, training will resume from the specified step - load_optimizer=True, # Whether to load optimizer weights when resuming training, default value is True + # When resuming training from a breakpoint,: + # (1) 'path' is the path of the loaded checkpoint. + # (2) 'content' indicates which state will be loaded, support: "model", "sampler", "optimizer", "scheduler", "all" + # (3) 'ckpt_type' indicates which type ckpt will be loaded, currently supported: "internlm" + load_ckpt_info=dict(path=MODEL_ONLY_FOLDER, content=("model",), ckpt_type="internlm"), ) ``` Note: -- `load_model_only_folder` and `load_ckpt_folder` cannot be set at the same time. - If the path starts with `local:`, it means the file is stored in the local file system. If it starts with `boto3:`, it means the file is stored in the remote OSS. The configuration for the model is as follows: diff --git a/doc/usage.md b/doc/usage.md index 11a4394..ea43084 100644 --- a/doc/usage.md +++ b/doc/usage.md @@ -103,18 +103,17 @@ data = dict( 如果在启动训练时要加载模型 `checkpoint`,可进行如下相关配置: ```python SAVE_CKPT_FOLDER = "local:/path/to/save/ckpt" -MODEL_ONLY_FOLDER = "local:/path/to/load/init/model/ckpt" LOAD_CKPT_FOLDER = "local:/path/to/load/resume/ckpt" ckpt = dict( save_ckpt_folder=SAVE_CKPT_FOLDER, # 存储模型和优化器 checkpoint 的路径 checkpoint_every=float("inf"), # 每多少个 step 存储一次 checkpoint,默认值为 inf - load_model_only_folder=MODEL_ONLY_FOLDER, # 加载模型初始权重的路径,只加载模型权重,不加载优化器权重,训练将从第一个 step 开始 - load_ckpt_folder=LOAD_CKPT_FOLDER, # 断点续训时,加载模型和优化器等权重的路径,将从指定的 step 恢复训练 - load_optimizer=True, # 断点续训时,是否需要加载优化器权重,默认值为 True + # 断点续训时,加载模型和优化器等权重的路径,将从指定的 step 恢复训练 + # content 表示哪些状态会被加载,支持: "model", "sampler", "optimizer", "scheduler", "all" + # ckpt_type 表示加载的模型类型,目前支持: "internlm" + load_ckpt_info=dict(path=MODEL_ONLY_FOLDER, content=("model",), ckpt_type="internlm"), ) ``` 注意: -- `load_model_only_folder`与`load_ckpt_folder`不能同时设置 - 路径若以 `local:` 为前缀,则存储在本地文件系统;若以 `boto3:` 为前缀,则存储在远程 oss 上 模型相关关键参数配置如下所示: diff --git a/internlm/core/trainer.py b/internlm/core/trainer.py index 2839ad9..aaf4543 100644 --- a/internlm/core/trainer.py +++ b/internlm/core/trainer.py @@ -23,7 +23,15 @@ class TrainState: train_dl (DataLoader): The DataLoader object used for training. """ - def __init__(self, config) -> None: + def __init__(self, config, batch_sampler) -> None: + """ + Args: + config (Config): internlm config + batch_sampler (torch.utils.data.Sampler): Because the dataloader loading is + asynchronous and prefetched, the batch_sampler state maintained inside the + dataloader are faster then the actual training progress, so we copy the + batch_sampler as the anchor point of ckpt reload. + """ # The number of batches produced by the data iterator self.batch_count: int = 0 # Used to store the number of samples consumed in the current epoch @@ -43,9 +51,20 @@ class TrainState: self.tensorboard_folder = config.tensorboard_folder - def init_batch_sampler(self, train_dl): - # Copy of the batch sampler from the DataLoader - self.batch_sampler = train_dl.batch_sampler.copy() + # learning rate + self.lr = config.adam.lr + + # smapler state + if batch_sampler: + self.init_batch_sampler(batch_sampler) + + def init_batch_sampler(self, batch_sampler): + """ + Args: + batch_sampler (torch.utils.data.Sampler): sampler. + """ + # make a copy of batch_sampler. + self.batch_sampler = batch_sampler.copy() # Iterator for the batch sampler self.batch_sampler_iter = iter(self.batch_sampler) @@ -61,26 +80,22 @@ class TrainState: return json.dumps(info, indent=4, sort_keys=True) - def load_state_dict(self, other_stuffs, train_dl): + def load_state_dict(self, other_stuffs): """ Resumes training from a checkpoint. Args: other_stuffs (dict): Other information needed to resume training. - train_dl (DataLoader): The DataLoader object used for training. """ - - self.batch_count = other_stuffs["batch_count"] + 1 # here you need to shift a batch backward self.num_consumed_samples_in_epoch = other_stuffs["num_consumed_samples_in_epoch"] self.num_consumed_tokens = other_stuffs["num_consumed_tokens"] self.inf_nan_skip_batches = other_stuffs["inf_nan_skip_batches"] - # compatible with previous checkpoints without this parameter - self.step_count = other_stuffs.get("step_count", other_stuffs["batch_count"]) + 1 - # track the actual updates of sampler when using weighted sampling - if hasattr(self, "batch_sampler"): - self.batch_sampler = train_dl.batch_sampler.copy() - self.batch_sampler_iter = iter(self.batch_sampler) + # Because the ckpt save occurs after updating 'step_count', + # there is no need to increment 'step_count' here (Does our step count start from 0 ?), + # However, 'batch_count' is updating before ckpt storage, so it need to inc 1 when resume. + self.batch_count = other_stuffs["batch_count"] + 1 # here you need to shift a batch backward + self.step_count = other_stuffs.get("step_count", self.batch_count) # resume tensorboard from older tensorboard_folder self.resume_tb_folder = other_stuffs.get("tensorboard_folder", None) diff --git a/internlm/initialize/launch.py b/internlm/initialize/launch.py index a69a506..b446934 100644 --- a/internlm/initialize/launch.py +++ b/internlm/initialize/launch.py @@ -12,7 +12,6 @@ from internlm.core.context import Config from internlm.core.context import global_context as gpc from internlm.utils.common import get_master_node from internlm.utils.logger import get_logger -from internlm.utils.storage_manager import init_storage_manager logger = get_logger(__file__) @@ -111,7 +110,7 @@ def args_sanity_check(): # processing the checkpoint config ckpt = gpc.config.ckpt if "enable_save_ckpt" not in ckpt: - ckpt._add_item("enable_save_ckpt", False) + ckpt._add_item("enable_save_ckpt", True) # Saving checkpoint args. if ckpt.enable_save_ckpt: @@ -137,9 +136,6 @@ def args_sanity_check(): if not ckpt.async_upload: ckpt._add_item("async_upload_tmp_folder", None) - if "snapshot_ckpt_folder" not in ckpt: - ckpt._add_item("snapshot_ckpt_folder", os.path.join(ckpt.save_ckpt_folder, "snapshot")) - if "oss_snapshot_freq" not in ckpt: ckpt._add_item("oss_snapshot_freq", float("inf")) # if oss_snapshot_freq not given, we disable. else: @@ -149,44 +145,23 @@ def args_sanity_check(): ckpt._add_item("async_upload", False) ckpt._add_item("async_upload_tmp_folder", None) ckpt._add_item("snapshot_ckpt_folder", None) - ckpt._add_item("snapshot_ckpt_folder", None) - - # Loading checkpoint args. - if "load_model_only_folder" not in ckpt: - ckpt._add_item("load_model_only_folder", None) if "load_ckpt_folder" not in ckpt: ckpt._add_item("load_ckpt_folder", None) - if "load_optimizer" not in ckpt: - ckpt._add_item("load_optimizer", True) - if "stop_file_path" not in ckpt: ckpt._add_item("stop_file_path", None) - if "load_given_ckpt" not in ckpt: - # If 'load_given_ckpt' is not given, we set it to False, so internlm can have opportunity + if "auto_resume" not in ckpt: + # If 'auto_resume' is not given, we set it to True, so internlm can have opportunity # to auto-load latest checkpoint. - ckpt._add_item("load_given_ckpt", False) - - if ckpt.load_given_ckpt: - # Priority: load_given_ckpt(True) > latest_checkpoint > load_model_only_folder - if ckpt.load_ckpt_folder and ckpt.load_model_only_folder: - logger.warning( - "Detect 'load_ckpt_folder' and 'load_model_only_folder' set at the same time, \ -and 'load_given_ckpt' is True, so internlm will load from 'load_ckpt_folder'" - ) - ckpt.load_model_only_folder = None + ckpt._add_item("auto_resume", True) if gpc.is_rank_for_log(): logger.info("+" * 15 + " Ckpt Info " + "+" * 15) # pylint: disable=W1201 logger.info(f"is enable save ckpt: {ckpt.enable_save_ckpt}") logger.info(f"save_ckpt_folder: {ckpt.save_ckpt_folder}") logger.info(f"checkpoint_every: {ckpt.checkpoint_every}") - logger.info(f"load_given_ckpt: {ckpt.load_given_ckpt}") - - # initialization storage manager - init_storage_manager(ckpt) # tensorboard writer config if "enable_tb" not in gpc.config: @@ -459,3 +434,11 @@ def initialize_distributed_env( if args_check: args_sanity_check() + + +def get_config_value(config, key, defalut): + try: + value = config[key] + except KeyError: + value = defalut + return value diff --git a/internlm/initialize/legacy/__init__.py b/internlm/initialize/legacy/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/internlm/initialize/legacy/launch.py b/internlm/initialize/legacy/launch.py new file mode 100644 index 0000000..8313654 --- /dev/null +++ b/internlm/initialize/legacy/launch.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python +# -*- encoding: utf-8 -*- + +from internlm.initialize.launch import get_config_value +from internlm.utils.logger import get_logger + +logger = get_logger(__file__) + + +def auto_resume_sanity_check(ckpt_config): + load_given_ckpt = get_config_value(ckpt_config, "load_given_ckpt", None) + if load_given_ckpt is None: + return True # default value is True + else: + return not load_given_ckpt + + +def ckpt_info_sanity_check(ckpt_config): + load_ckpt_folder = get_config_value(ckpt_config, "load_ckpt_folder", None) + + load_model_only_folder = get_config_value(ckpt_config, "load_model_only_folder", None) + + if load_model_only_folder is not None: + assert ( + load_ckpt_folder is None + ), "Detect 'load_ckpt_folder' and 'load_model_only_folder' set at the same time, \ +# and 'load_given_ckpt' is True, so internlm will load from 'load_ckpt_folder'" + return dict(path=load_model_only_folder, content=("model",), ckpt_type="internlm") + else: + load_optimizer = get_config_value(ckpt_config, "load_optimizer", True) + + if isinstance(load_ckpt_folder, str): + if load_optimizer: + return dict(path=load_ckpt_folder, content=("model", "sampler", "optimizer"), ckpt_type="internlm") + else: + return dict(path=load_ckpt_folder, content=("model", "sampler"), ckpt_type="internlm") + elif load_ckpt_folder is None: + return None + else: + assert f"Unsupport data type:'{type(load_ckpt_folder)}' for config.ckpt arg: 'load_ckpt_folder'" diff --git a/internlm/solver/optimizer/__init__.py b/internlm/solver/optimizer/__init__.py index 3da5bbe..99051f4 100644 --- a/internlm/solver/optimizer/__init__.py +++ b/internlm/solver/optimizer/__init__.py @@ -1,6 +1,6 @@ #!/usr/bin/env python # -*- encoding: utf-8 -*- -from .hybrid_zero_optim import HybridZeroOptimizer +from .hybrid_zero_optim import HybridZeroOptimizer, reload_zero_fp32_buff -__all__ = ["HybridZeroOptimizer"] +__all__ = ["HybridZeroOptimizer", "reload_zero_fp32_buff"] diff --git a/internlm/solver/optimizer/hybrid_zero_optim.py b/internlm/solver/optimizer/hybrid_zero_optim.py index 63d2bfa..6ba9af3 100644 --- a/internlm/solver/optimizer/hybrid_zero_optim.py +++ b/internlm/solver/optimizer/hybrid_zero_optim.py @@ -775,3 +775,17 @@ class HybridZeroOptimizer(BaseOptimizer): if "zero_devide_optim_plan" in states: self.params_per_rank_id_dict = states["zero_devide_optim_plan"] + + +def reload_zero_fp32_buff(optimizer): + # If we use AMP optimizer, we need to update its fp32 buffer as newly loaded weights value. + # Or we must ensure that loading model weights must be done before zero is initialized. + if isinstance(optimizer, HybridZeroOptimizer): + for group_id, param_group in enumerate(optimizer.optim.param_groups): + if optimizer.param_group_has_params[group_id]: + # flatten fp16 params have already been updated by 'load_model_checkpoint' + fp16_flat_current_rank = optimizer._param_store.get_flat_fp16_param_by_rank_group( + optimizer._zero_local_rank, group_id + ) + # param_group["params"] is fp32 flatten optimizer states of this zero rank. + param_group["params"][0].copy_(fp16_flat_current_rank.float()) diff --git a/internlm/utils/model_checkpoint.py b/internlm/utils/model_checkpoint.py index 09bafa5..87a1fb4 100644 --- a/internlm/utils/model_checkpoint.py +++ b/internlm/utils/model_checkpoint.py @@ -3,37 +3,135 @@ import copy import fcntl +import inspect import os import socket import time from enum import Enum -from typing import Dict +from typing import Callable, Dict, Union import torch from internlm.core.context import ParallelMode from internlm.core.context import global_context as gpc from internlm.core.trainer import TrainState +from internlm.initialize.launch import get_config_value +from internlm.initialize.legacy.launch import ( + auto_resume_sanity_check, + ckpt_info_sanity_check, +) from internlm.monitor import send_alert_message -from internlm.solver.optimizer import HybridZeroOptimizer +from internlm.solver.optimizer import HybridZeroOptimizer, reload_zero_fp32_buff from internlm.utils.common import get_current_device from internlm.utils.logger import get_logger from internlm.utils.megatron_timers import megatron_timer as timer from internlm.utils.storage_manager import ( get_fns, get_storage_manager, + init_storage_manager, llm_load, llm_save, + try_get_storage_backend, ) logger = get_logger(__file__) -class CheckpointType(Enum): +class CheckpointSaveType(Enum): NORMAL_CHECKPOINT = 1 SNAPSHOT_CHECKPOINT = 2 +class CheckpointLoadType(Enum): + INTERNLM = "internlm" + + +# The load method implemented by internlm by default does not use string representation types, +# but uses enumeration types defined in advance. +LOAD_TYPE_DICT = { + "internlm": CheckpointLoadType.INTERNLM, +} + + +class CheckpointLoadContent: + MODEL = "model" + SAMPLER = "sampler" + OPIMIZER = "optimizer" + SCHEDULAER = "scheduler" + + +class CheckpointLoadMethod: + """The registration class of the checkpoint loading method, + users can define their own custom ckpt loading methods.""" + + LOAD_FUNC_SIG = None + LOAD_TYPE_FUNC = {} + + @staticmethod + def convet_load_type(load_type: str) -> Union[CheckpointLoadType, str]: + if load_type.lower() in LOAD_TYPE_DICT: + # The ckpt load method implemented by internlm by default. + return LOAD_TYPE_DICT[load_type.lower()] + else: + # If it is a user-defined field, we do not do any conversion and represent it as a string. + return load_type + + @staticmethod + def register_ckpt_load_type(load_type: Union[str, CheckpointLoadType], load_func: Callable): + if load_type in CheckpointLoadMethod.LOAD_TYPE_FUNC: + logger.warning(f"{load_type} has aleady been registed!") + return + + CheckpointLoadMethod.LOAD_TYPE_FUNC.update({load_type: load_func}) + + if load_type == CheckpointLoadType.INTERNLM: + CheckpointLoadMethod.LOAD_FUNC_SIG = inspect.signature(load_func) + else: + if inspect.signature(load_func) != CheckpointLoadMethod.LOAD_FUNC_SIG: + logger.warning( + f"registe load model ckpt signature is not same with: {CheckpointLoadMethod.LOAD_FUNC_SIG}" + ) + + @staticmethod + def get_ckpt_load_type_func(load_type: Union[str, CheckpointLoadType]): + return CheckpointLoadMethod.LOAD_TYPE_FUNC[load_type] + + +class CheckpointLoadMask: + """ + According to the content field in the incoming ckpt_info, decide which components to load. + """ + + LOAD_CONTENT_DICT = { + "model": CheckpointLoadContent.MODEL, + "sampler": CheckpointLoadContent.SAMPLER, + "optimizer": CheckpointLoadContent.OPIMIZER, + "scheduler": CheckpointLoadContent.SCHEDULAER, + } + + def __init__(self, content: tuple) -> None: + self.load_set = set(map(lambda x: x.lower(), content)) + if "all" in self.load_set: + self.load_set = set(CheckpointLoadMask.LOAD_CONTENT_DICT.values()) + else: + self.load_set = set(map(lambda x: CheckpointLoadMask.LOAD_CONTENT_DICT[x.lower()], content)) + + def need_load(self, content: CheckpointLoadContent): + return content in self.load_set + + def not_only_load(self, content: CheckpointLoadContent): + return content in self.load_set and len(self.load_set) > 1 + + def only_load(self, content: CheckpointLoadContent): + return set(content) == self.load_set + + def __str__(self) -> str: + return f"{self.load_set}." + + def __repr__(self) -> str: + return f"{self.load_set}." + + def get_model_topology(model): """ Returns: @@ -55,6 +153,66 @@ def get_model_topology(model): return topos +def try_load_internlm_ckpt(ckpt_mm, load_info, train_state: TrainState): + load_content_str = "" + load_ckpt_folder = load_info["path"] + load_content: CheckpointLoadMask = load_info["content"] + + if gpc.is_rank_for_log(): + logger.info(f"Try load_ckpt_folder: {load_ckpt_folder}") + + if load_content.need_load(CheckpointLoadContent.MODEL): + load_model_checkpoint(folder=load_ckpt_folder, model=ckpt_mm.model) + load_content_str += f"{CheckpointLoadContent.MODEL}, " + + if load_content.not_only_load(CheckpointLoadContent.MODEL): + # load training states. + load_context(load_ckpt_folder, train_state) + + # load optimzier states. + if load_content.need_load(CheckpointLoadContent.OPIMIZER): + load_optimizer_checkpoint(load_ckpt_folder, ckpt_mm.optimizer) + load_content_str += f"{CheckpointLoadContent.OPIMIZER}, " + else: + if gpc.is_rank_for_log(): + logger.warning("CheckpointManager has no 'optimizer', skip reload optim checkpoint!") + + # load lr scheduler states. + if load_content.need_load(CheckpointLoadContent.SCHEDULAER): + if ckpt_mm.lr_scheduler: + load_scheduler(load_ckpt_folder, ckpt_mm.lr_scheduler, ckpt_mm.optimizer, train_state) + load_content_str += f"{CheckpointLoadContent.SCHEDULAER}, " + else: + if gpc.is_rank_for_log(): + logger.warning("CheckpointManager has no 'lr_scheduler', skip reload lr_scheduler checkpoint!") + + # load dataloader sampler states. + if load_content.need_load(CheckpointLoadContent.SAMPLER): + if hasattr(train_state, "batch_sampler") and not isinstance( + train_state.batch_sampler, torch.utils.data.sampler.BatchSampler + ): + load_sampler(load_ckpt_folder, ckpt_mm.train_dl.batch_sampler) + # track the actual updates of sampler when using weighted sampling + train_state.init_batch_sampler(ckpt_mm.train_dl.batch_sampler) + load_content_str += f"{CheckpointLoadContent.SAMPLER}, " + else: + if gpc.is_rank_for_log(): + logger.warning("CheckpointManager skip reload 'batch_sampler'") + + # reload data state dict. + if hasattr(train_state, "data_state_dict"): + ckpt_mm.train_dl.dataset.load_state_dict( + llm_load(os.path.join(load_ckpt_folder, "sampler_0.pt")), ckpt_path=load_ckpt_folder + ) + load_content_str += f"{CheckpointLoadContent.SAMPLER}, " + else: + if gpc.is_rank_for_log(): + logger.warning( + "CheckpointManager has no 'data_state_dict', skip reload data_state_dict checkpoint!" + ) + return load_content_str + + def save_model_checkpoint(folder, model): """ Save the model according to the relationship between tp and dp. The principle is that the data of each tp @@ -233,15 +391,16 @@ def load_sampler(ckpt_path: str, sampler): torch.cuda.empty_cache() -def load_context(ckpt_path: str, train_dl, train_state: TrainState): +def load_context(ckpt_path: str, train_state: TrainState): context_stuffs = llm_load(os.path.join(ckpt_path, "context.pt")) - train_state.load_state_dict(context_stuffs, train_dl) + train_state.load_state_dict(context_stuffs) if gpc.is_rank_for_log(): logger.info(f"reload train_state:{train_state}") torch.cuda.empty_cache() -def load_scheduler(ckpt_path: str, lr_scheduler, optimizer, learning_rate, train_state: TrainState): +def load_scheduler(ckpt_path: str, lr_scheduler, optimizer, train_state: TrainState): + learning_rate = train_state.lr scheduler_states = llm_load(os.path.join(ckpt_path, "schedulder.pt")) if learning_rate != scheduler_states["base_lrs"][0] and gpc.is_rank_for_log(): logger.warning( @@ -270,7 +429,17 @@ def load_scheduler(ckpt_path: str, lr_scheduler, optimizer, learning_rate, train class CheckpointManager: """StorageManagerContext""" - def __init__(self, ckpt_config, model, model_config=None, model_config_file=None, feishu_address=None) -> None: + def __init__( + self, + ckpt_config, + model, + train_dl=None, + optimizer=None, + lr_scheduler=None, + model_config=None, + model_config_file=None, + feishu_address=None, + ) -> None: """ CheckpointManager is used to decide when to store ckpt. If it is an asynchronous upload mode, you must call wait_async_upload_finish at the end of the program to wait @@ -283,22 +452,44 @@ class CheckpointManager: lr_scheduler (object): lr_scheduler obj. model_config (dict): model config. """ - self.enable_save_ckpt = ckpt_config.enable_save_ckpt - self.checkpoint_every = ckpt_config.checkpoint_every - self.save_ckpt_folder = ckpt_config.save_ckpt_folder - self.snapshot_ckpt_folder = ckpt_config.snapshot_ckpt_folder - self.oss_snapshot_freq: int = ckpt_config.oss_snapshot_freq - self.stop_file_path = ckpt_config.stop_file_path - self.load_model_only_folder = ckpt_config.load_model_only_folder + self.enable_save_ckpt = get_config_value(ckpt_config, "enable_save_ckpt", False) + self.checkpoint_every = get_config_value(ckpt_config, "checkpoint_every", 100) + self.save_ckpt_folder = get_config_value(ckpt_config, "save_ckpt_folder", None) + self.oss_snapshot_freq: int = get_config_value(ckpt_config, "oss_snapshot_freq", 50) + self.stop_file_path = get_config_value(ckpt_config, "stop_file_path", None) + if self.save_ckpt_folder: + self.snapshot_ckpt_folder = get_config_value( + ckpt_config, "snapshot_ckpt_folder", os.path.join(self.save_ckpt_folder, "snapshot") + ) + self.async_upload_tmp_folder = get_config_value( + ckpt_config, "async_upload_tmp_folder", "/dev/shm/internlm_tmp_ckpt/" + ) + else: + self.snapshot_ckpt_folder = None + self.async_upload_tmp_folder = None + + self.async_upload = get_config_value(ckpt_config, "async_upload", False) + + # initialization storage manager + init_storage_manager(self.enable_save_ckpt, self.async_upload_tmp_folder, self.async_upload) + self.feishu_address = feishu_address self.storage_manager = get_storage_manager() self.snapshot_counter = 0 - self.load_optimizer = gpc.config.ckpt.load_optimizer self.model = model + self.optimizer = optimizer + self.lr_scheduler = lr_scheduler + self.train_dl = train_dl self.model_config = model_config self.model_config_file = model_config_file + # Register defalut internlm ckpt load type. + self.defalut_load_type_func = {CheckpointLoadType.INTERNLM: try_load_internlm_ckpt} + for ckpt_load_type in CheckpointLoadType: + CheckpointLoadMethod.register_ckpt_load_type(ckpt_load_type, self.defalut_load_type_func[ckpt_load_type]) + + # Init alter file. if self.stop_file_path and gpc.get_global_rank() == 0: dir_path = os.path.dirname(self.stop_file_path) if dir_path != "" and not os.path.exists(dir_path): @@ -306,21 +497,35 @@ class CheckpointManager: with open(self.stop_file_path, "w", encoding="utf-8") as f: f.write("0") - if ckpt_config.load_given_ckpt is False: - # Priority: load_given_ckpt(True) > latest_checkpoint > load_model_only_folder - latest_ckpt_path = self.query_lastest_ckpt() - if latest_ckpt_path: - self.load_ckpt_folder = latest_ckpt_path - else: - # At this time, we have to load model init weights and train from step 0. - self.load_ckpt_folder = self.load_model_only_folder - else: - self.load_ckpt_folder = ckpt_config.load_ckpt_folder + self.load_ckpt_info = get_config_value(ckpt_config, "load_ckpt_info", None) + if self.load_ckpt_info is None: # (legacy): Try Compatible with old interfaces + self.load_ckpt_info = ckpt_info_sanity_check(ckpt_config) - if gpc.is_rank_for_log(): - logger.info(f"load_ckpt_folder will set to :'{self.load_ckpt_folder}'") - if self.stop_file_path is None: - logger.warning("no set stop_file_path, quit_signal_handler is disable") + # Auto-reload latest checkpoint, it will overwrite the setting of 'load_ckpt_info'. + self.auto_resume = get_config_value(ckpt_config, "auto_resume", None) + if self.auto_resume is None: # (legacy): Try Compatible with old interfaces + self.auto_resume = auto_resume_sanity_check(ckpt_config) + if self.auto_resume: + self.load_ckpt_info = self.query_lastest_ckpt() + + if self.stop_file_path is None and gpc.is_rank_for_log(): + logger.warning("no set stop_file_path, quit_signal_handler is disable") + + # convert to internal representation + if self.load_ckpt_info: + assert ( + "path" in self.load_ckpt_info + and "content" in self.load_ckpt_info + and "ckpt_type" in self.load_ckpt_info + ), "please set content in ckpt setting, eg: ckpt = dict(path='', content=['model'], ckpt_type='internlm')" + + # replace load_ckpt + self.load_ckpt_info["content"] = CheckpointLoadMask(self.load_ckpt_info["content"]) + self.load_ckpt_info["ckpt_type"] = CheckpointLoadMethod.convet_load_type(self.load_ckpt_info["ckpt_type"]) + + # test storage setting is ok. + if self.enable_save_ckpt: + self.try_ping_storage() def quit_signal_handler(self, train_state) -> bool: """ @@ -334,7 +539,7 @@ class CheckpointManager: Returns: bool: whether to quit. """ - now_break, now_save_ckpt, save_type = False, False, CheckpointType.NORMAL_CHECKPOINT + now_break, now_save_ckpt, save_type = False, False, CheckpointSaveType.NORMAL_CHECKPOINT if self.stop_file_path is None: return now_break, now_save_ckpt, save_type @@ -365,24 +570,29 @@ now step_count is {train_state.step_count}", return now_break, now_save_ckpt, save_type - def try_save_checkpoint(self, train_state): - if not self.enable_save_ckpt: - return False - - save_ckpts, save_type = False, CheckpointType.NORMAL_CHECKPOINT + def is_now_to_save_ckpt(self, train_state) -> (bool, CheckpointSaveType, bool): + save_ckpts, save_type, now_break = False, CheckpointSaveType.NORMAL_CHECKPOINT, False if self.oss_snapshot_freq > 1 and train_state.step_count % self.oss_snapshot_freq == 0: - save_ckpts, save_type = True, CheckpointType.SNAPSHOT_CHECKPOINT + save_ckpts, save_type = True, CheckpointSaveType.SNAPSHOT_CHECKPOINT if train_state.step_count % self.checkpoint_every == 0: - save_ckpts, save_type = True, CheckpointType.NORMAL_CHECKPOINT + save_ckpts, save_type = True, CheckpointSaveType.NORMAL_CHECKPOINT now_break, singal_save_ckpts, singal_save_type = self.quit_signal_handler(train_state) if save_ckpts is False: save_ckpts = singal_save_ckpts save_type = singal_save_type + return save_ckpts, save_type, now_break + + def try_save_checkpoint(self, train_state): + if not self.enable_save_ckpt: + return False + + save_ckpts, save_type, now_break = self.is_now_to_save_ckpt(train_state) + if save_ckpts: # Wait for the previous round of asynchronous upload storage to complete. self.storage_manager.wait() - if save_type == CheckpointType.SNAPSHOT_CHECKPOINT: + if save_type == CheckpointSaveType.SNAPSHOT_CHECKPOINT: # Snapshot number, with only two snapshots written alternately. self.snapshot_counter = (self.snapshot_counter + 1) % 2 save_ckpt_folder = os.path.join(self.snapshot_ckpt_folder, f"{self.snapshot_counter}") @@ -412,7 +622,7 @@ now step_count is {train_state.step_count}", Tuple(str, int): path of latest ckpt and ckpt step, if not found, None will return. """ ckpt_list = self.storage_manager.get_fns(self.save_ckpt_folder) - if len(ckpt_list) == 0: + if ckpt_list is None or len(ckpt_list) == 0: return None, None max_normal_step = 0 @@ -435,14 +645,16 @@ now step_count is {train_state.step_count}", ckpt_list_1 = self.storage_manager.get_fns(snapshot_path_0) ckpt_list_2 = self.storage_manager.get_fns(snapshot_path_1) max_step_0, max_step_1 = 0, 0 - for ckpt in ckpt_list_1: - ckpt = ckpt.strip("/") - if ckpt.endswith(".step"): - max_step_0 = max(max_step_0, int(ckpt.split(".")[0])) - for ckpt in ckpt_list_2: - ckpt = ckpt.strip("/") - if ckpt.endswith(".step"): - max_step_1 = max(max_step_1, int(ckpt.split(".")[0])) + if ckpt_list_1: + for ckpt in ckpt_list_1: + ckpt = ckpt.strip("/") + if ckpt.endswith(".step"): + max_step_0 = max(max_step_0, int(ckpt.split(".")[0])) + if ckpt_list_2: + for ckpt in ckpt_list_2: + ckpt = ckpt.strip("/") + if ckpt.endswith(".step"): + max_step_1 = max(max_step_1, int(ckpt.split(".")[0])) snap_load_path = snapshot_path_0 if max_step_0 > max_step_1 else snapshot_path_1 snap_step = max(max_step_0, max_step_1) @@ -452,11 +664,12 @@ now step_count is {train_state.step_count}", def query_latest_snapshot_step_local(self): max_step, max_step_path = 0, None - for root, _, files in os.walk(self.save_ckpt_folder, followlinks=True): + save_ckpt_folder = self.save_ckpt_folder.split(":")[1] + for root, _, files in os.walk(save_ckpt_folder, followlinks=True): for fn in files: fn = fn.strip("/") if fn.endswith(".step"): - # We assume that both normal ckpt and snapshot ckpt will store the '.step' file + # We assume that both internlm ckpt and snapshot ckpt will store the '.step' file # as an integrity flag. step = int(fn.rsplit(".", maxsplit=1)[0]) if max_step < step: @@ -466,99 +679,53 @@ now step_count is {train_state.step_count}", return max_step_path, max_step def query_lastest_ckpt(self): - latest_checkpoint = None + latest_ckpt, step = None, -1 # Training was automatically restarted by the process, forcing the latest snapshot to be read. if self.save_ckpt_folder: - if self.save_ckpt_folder.startswith("boto3"): - latest_checkpoint, step = self.query_latest_snapshot_step_boto3() - elif self.save_ckpt_folder.startswith("local"): - latest_checkpoint, step = self.query_latest_snapshot_step_local() - else: - latest_checkpoint, step = None, 0 + backend, _ = try_get_storage_backend(self.save_ckpt_folder) + if backend == "boto3": + latest_ckpt, step = self.query_latest_snapshot_step_boto3() + if latest_ckpt and not latest_ckpt.startswith("boto3:"): + latest_ckpt = ":".join(["boto3", latest_ckpt]) + elif backend == "local": + latest_ckpt, step = self.query_latest_snapshot_step_local() + if latest_ckpt and not latest_ckpt.startswith("local:"): + latest_ckpt = ":".join(["local", latest_ckpt]) - if latest_checkpoint is not None: - if gpc.is_rank_for_log(): - logger.info(f"Found latest ckpt : {latest_checkpoint}, step: {step}") - send_alert_message( - address=self.feishu_address, - message=f"Auto restart resume from ckpt-path: '{latest_checkpoint}', step : {step}", - ) - else: - if gpc.is_rank_for_log(): - send_alert_message( - address=self.feishu_address, - message=f"Can't find snapshot checkpoint, use default load-ckpt path: {latest_checkpoint}", - ) + if gpc.is_rank_for_log(): + logger.info(f"Found latest ckpt {latest_ckpt if latest_ckpt else 'None'}, step: {step}...") - return latest_checkpoint + return dict(path=latest_ckpt, content=("all",), ckpt_type="internlm") - def try_load_model(self, current_time=""): - model_load_path = None + def try_resume_training(self, train_state: TrainState, current_time=""): - if self.load_ckpt_folder and self.load_model_only_folder: - raise ValueError( - "Error, try to use both load_ckpt_folder and load_model_only_folder paths, \ -if you only need to load model weights (for example starting an SFT task for the first time), \ -set load_model_only_folder path, if you need to resume training from ckpt, \ -set load_ckpt_folder or use default value \ -(if is the default value, internlm will try to load the latest ckpt from save_ckpt_folder)" - ) - - if self.load_ckpt_folder: - if gpc.is_rank_for_log(): - logger.info( - f"===========Resume training from `{self.load_ckpt_folder}` {current_time} on host:" - f"{socket.gethostname()}===========" - ) - model_load_path = self.load_ckpt_folder - elif self.load_model_only_folder: - if gpc.is_rank_for_log(): - logger.info( - f"===========Load Model from `{self.load_model_only_folder}` {current_time} on host:" - f"{socket.gethostname()}===========" - ) - model_load_path = self.load_model_only_folder - else: + if self.load_ckpt_info is None or self.load_ckpt_info["path"] is None: if gpc.is_rank_for_log(): logger.info( f"===========New Run {current_time} on host:{socket.gethostname()},rank={gpc.get_global_rank()}," f"tp={gpc.get_local_rank(ParallelMode.TENSOR)},pp={gpc.get_local_rank(ParallelMode.PIPELINE)}," f"dp={gpc.get_local_rank(ParallelMode.DATA)}===========" ) + else: + load_path = self.load_ckpt_info["path"] + load_content = self.load_ckpt_info["content"] + load_type = self.load_ckpt_info["ckpt_type"] - # Loading model weights must be done before zero is initialized. - if model_load_path is not None: - load_model_checkpoint(folder=model_load_path, model=self.model) + load_func = CheckpointLoadMethod.get_ckpt_load_type_func(load_type) + load_content_str = load_func(self, self.load_ckpt_info, train_state) - def try_resume_training(self, lr_scheduler, optimizer, lr, train_state, train_dl): - """Attempt to restore the training state of the last ckpt. + # If we only load model weight, we need rewrite zero optim's fp32 buffer. + if load_content.only_load(CheckpointLoadContent.MODEL) and isinstance(self.optimizer, HybridZeroOptimizer): + reload_zero_fp32_buff(self.optimizer) - Args: - lr_scheduler (_LRScheduler): lr_scheduler object. - optimizer (Optimizer): optimizer object. - lr (float): learning rate. - train_state (dict): traing states. - train_dl (DataLoader): traning dataloader object - """ - if self.load_ckpt_folder is not None: - # load optimzier states. - if self.load_optimizer: - load_optimizer_checkpoint(self.load_ckpt_folder, optimizer) - # load lr scheduler states. - load_scheduler(self.load_ckpt_folder, lr_scheduler, optimizer, lr, train_state) - # load training states. - load_context(self.load_ckpt_folder, train_dl, train_state) - # load dataloader sampler states. - if hasattr(train_state, "batch_sampler") and not isinstance( - train_state.batch_sampler, torch.utils.data.sampler.BatchSampler - ): - load_sampler(self.load_ckpt_folder, train_dl.batch_sampler) - if hasattr(train_state, "data_state_dict"): - train_dl.dataset.load_state_dict( - llm_load(os.path.join(self.load_ckpt_folder, "sampler_0.pt")), ckpt_path=self.load_ckpt_folder + if gpc.is_rank_for_log(): + logger.info(f"load_ckpt_info : {self.load_ckpt_info}") + logger.info( + f"===========Resume training from `{load_path}` {current_time} on host:" + f"{socket.gethostname()}===========" ) - self.optimizer = optimizer - self.lr_scheduler = lr_scheduler + if load_content_str: + logger.info(f"===========Load contents are: {load_content_str}") def save_checkpoint( self, @@ -600,8 +767,10 @@ set load_ckpt_folder or use default value \ ) if gpc.is_rank_for_log(): - scheduler_states = scheduler.state_dict() - llm_save(os.path.join(folder, "schedulder.pt"), saved_obj=scheduler_states) + if scheduler: + scheduler_states = scheduler.state_dict() + llm_save(os.path.join(folder, "schedulder.pt"), saved_obj=scheduler_states) + if hasattr(train_state, "batch_sampler") and not isinstance( train_state.batch_sampler, torch.utils.data.sampler.BatchSampler ): @@ -631,3 +800,12 @@ set load_ckpt_folder or use default value \ def set_save_folder(self, folder, step): self.storage_manager.latest_save_folder = folder self.storage_manager.latest_save_step = step + + def try_ping_storage(self): + if gpc.get_global_rank() % 8 == 0: + buff = torch.ones((1, 64, 64), dtype=torch.bfloat16) + test_fn = os.path.join(self.save_ckpt_folder, f"pings/{socket.gethostname()}.ping") + self.storage_manager.save(test_fn, buff) + self.storage_manager.wait() + self.storage_manager.load(test_fn) + del buff diff --git a/internlm/utils/storage_manager.py b/internlm/utils/storage_manager.py index c7b71f4..8f562e4 100644 --- a/internlm/utils/storage_manager.py +++ b/internlm/utils/storage_manager.py @@ -136,6 +136,22 @@ def compute_file_md5_by_chunk(file_name: str): return hash_md5.hexdigest() +def try_get_storage_backend(path: str): + sre = path.split(":", maxsplit=1) + if len(sre) == 1: + if path.startswith("s3:"): + backend = "boto3" + if gpc.is_rank_for_log(): + logger.warning(f"path: '{path}' not start with backend prefix, guess it is the backend of boto3.") + else: + backend = "local" + if gpc.is_rank_for_log(): + logger.warning(f"path: '{path}' not start with backend prefix, guess it is the backend of local.") + return backend, sre + else: + return sre[0], sre[1] # (backend_prefix, splited_path) + + class Boto3Client(StorageClient): """ Boto3Client @@ -231,21 +247,34 @@ class Boto3Client(StorageClient): def assert_fp_exists(handler, bucket_name: str, fp: str, local_nvme_path: str): # pylint: disable=W0613 assert len(list(handler.client.list_objects(Bucket=bucket_name, Prefix=fp)["Contents"])) > 0, fp + @staticmethod + def is_fp_exists(handler, bucket_name: str, fp: str, local_nvme_path: str): # pylint: disable=W0613 + re = handler.client.list_objects(Bucket=bucket_name, Prefix=fp) + if "Contents" in re: + return len(list(re["Contents"])) > 0 + else: + return False + @staticmethod def get_fns(handler, bucket_name: str, fp: str, local_nvme_path: str, *args, **kwargs): # pylint: disable=W0613 """ Ref: https://stackoverflow.com/questions/54314563/ how-to-get-more-than-1000-objects-from-s3-by-using-list-objects-v2 """ - paginator = handler.client.get_paginator("list_objects_v2") - pages = paginator.paginate(Bucket=bucket_name, Prefix=fp) - folder_name_list = [] - for page in pages: - if "Contents" in page: - for obj in page["Contents"]: - pth: str = obj["Key"] - folder_name_list.append(pth.split(fp, maxsplit=1)[1].strip("/").split("/", maxsplit=1)[0]) - return list(set(folder_name_list)) + if Boto3Client.is_fp_exists(handler, bucket_name, fp, None): + paginator = handler.client.get_paginator("list_objects_v2") + pages = paginator.paginate(Bucket=bucket_name, Prefix=fp) + folder_name_list = [] + for page in pages: + if "Contents" in page: + for obj in page["Contents"]: + pth: str = obj["Key"] + folder_name_list.append(pth.split(fp, maxsplit=1)[1].strip("/").split("/", maxsplit=1)[0]) + return list(set(folder_name_list)) + else: + if gpc.is_rank_for_log(): + logger.warning(f"'{fp}' not found!") + return None @staticmethod def async_upload_fileobj(handler, bucket_name: str, fp: str, local_nvme_path: str): @@ -297,9 +326,12 @@ class LocalClient(StorageClient): @staticmethod def get_fns(handler, folder): assert isinstance(handler, LocalClient) - assert os.path.exists(folder), f"folder '{folder}' not exists!" - fns = os.listdir(folder) - return fns + if not os.path.exists(folder): + if gpc.is_rank_for_log(): + logger.warning(f"'{folder}' not found!") + return None + else: + return os.listdir(folder) @staticmethod def delete_obj(handler, fp: str): @@ -436,10 +468,7 @@ class StorageManager(metaclass=SingletonMeta): Args: path (str): _description_ """ - try: - backend, path = path.split(":", maxsplit=1) - except Exception as exc: - raise AttributeError(f"Given path '{path}' is not startwith backend prefix:'local/boto3'") from exc + backend, path = try_get_storage_backend(path) init_args = (None,) if backend == "local": @@ -594,23 +623,24 @@ class StorageManager(metaclass=SingletonMeta): if gpc.is_rank_for_log(): self.upload_count += 1 - if self.async_mode: + if self.async_mode and self.latest_save_folder: self.save( os.path.join(self.latest_save_folder, f"{self.latest_save_step}.step"), saved_obj=dict({"step": self.latest_save_step}), async_upload=False, ) + self.latest_save_folder = None storage_manager: StorageManager = None -def init_storage_manager(ckpt_config): +def init_storage_manager(enable_save_ckpt, async_upload_tmp_folder, async_upload): global storage_manager storage_manager = StorageManager( - ckpt_config.enable_save_ckpt, - tmp_local_folder=ckpt_config.async_upload_tmp_folder, - async_mode=ckpt_config.async_upload, + enable_save_ckpt, + tmp_local_folder=async_upload_tmp_folder, + async_mode=async_upload, ) diff --git a/tests/test_utils/common_fixture.py b/tests/test_utils/common_fixture.py new file mode 100644 index 0000000..83ea8e2 --- /dev/null +++ b/tests/test_utils/common_fixture.py @@ -0,0 +1,143 @@ +import os + +import pytest +import torch + +from internlm.core.context import global_context as gpc +from internlm.core.context.parallel_context import Config +from internlm.solver.optimizer.hybrid_zero_optim import HybridZeroOptimizer +from internlm.utils.common import SingletonMeta + +# 1B +init_config = Config( + dict( + parallel=dict(zero1=1, pipeline=dict(size=1, interleaved_overlap=False), sequence_parallel=False, tensor=1), + model_type="INTERNLM", + adam=dict( + lr=1e-4, + ), + data=dict(seq_len=2048, micro_num=1, micro_bsz=1, pack_sample_into_one=False, min_length=0, total_steps=9999), + model=dict( + checkpoint=False, + num_attention_heads=2, + embed_split_hidden=True, + vocab_size=103168, + embed_grad_scale=1, + parallel_output=True, + hidden_size=1024, + num_layers=2, + mlp_ratio=1, + apply_post_layer_norm=False, + dtype=torch.bfloat16, + norm_type="rmsnorm", + layer_norm_epsilon=1e-5, + use_flash_attn=True, + num_chunks=1, + ), + resume_tb_folder="", + tensorboard_folder="", + ) +) + + +def init_naive_model(): + # let MODEL_INITIALIZER to work + import internlm.model.modeling_internlm # noqa # pylint: disable=unused-import + from internlm.core.naive_amp import NaiveAMPModel + from internlm.utils.registry import MODEL_INITIALIZER + + model = MODEL_INITIALIZER.get_module(module_name=gpc.config.model_type)(**(init_config.model)) + model = NaiveAMPModel( + model=model, + output_to_fp32=False, + dtype=torch.bfloat16, + sync_buffer=False, + ) + return model + + +def init_naive_optim(model): + naive_optimizer = torch.optim.AdamW( + params=[{"params": model.parameters(), "weight_decay": 0.01}], + lr=1e-4, + betas=(0.9, 0.95), + eps=1e-8, + ) + return naive_optimizer + + +def init_hybrid_optim(model): + naive_optimizer = torch.optim.AdamW( + params=[{"params": model.parameters(), "weight_decay": 0.01}], + lr=1e-4, + betas=(0.9, 0.95), + eps=1e-8, + ) + optimizer = HybridZeroOptimizer( + naive_optimizer, + grad_scal_cfg=Config( + dict( + fp16=dict( + initial_scale=2**16, + min_scale=1, + growth_interval=1000, + ), + growth_factor=2, + backoff_factor=0.5, + max_scale=2**24, + hysteresis=2, + ) + ), + zero_cfg=Config( + dict( + overlap_sync_grad=False, + overlap_sync_param=False, + reduce_bucket_size=512 * 1024 * 1024, + clip_grad_norm=1.0, + ) + ), + param_bcast_sync_handler=None, + ) + return optimizer + + +@pytest.fixture(autouse=True, scope="function") +def reset_singletons(): + SingletonMeta._instances = {} + + +def reset_seed(): + from internlm.core.context.random import _SEED_MANAGER + _SEED_MANAGER.reset() + +@pytest.fixture(scope="module") +def init_dist_and_model(): + from internlm.initialize import initialize_distributed_env + + os.environ["RANK"] = "0" + os.environ["LOCAL_RANK"] = "0" + os.environ["WORLD_SIZE"] = "1" + os.environ["MASTER_ADDR"] = "127.0.0.1" + os.environ["MASTER_PORT"] = "12377" + initialize_distributed_env(config=init_config, launcher="torch", master_port=12377, args_check=False) + + # setup + print("set up", flush=True) + model = init_naive_model() + # opim = init_naive_optim(model) + opim = init_hybrid_optim(model) + + yield model, opim + + # teardown + del model, opim + print("teardown", flush=True) + gpc.destroy() + reset_seed() + + + +def enter_flag(text): + print(f"{text} begin!", flush=True) + yield + print(f"{text} end!", flush=True) diff --git a/tests/test_utils/test_model_checkpoint.py b/tests/test_utils/test_model_checkpoint.py new file mode 100644 index 0000000..0a93dba --- /dev/null +++ b/tests/test_utils/test_model_checkpoint.py @@ -0,0 +1,278 @@ +import os +import shutil +from subprocess import PIPE, STDOUT, Popen + +import pytest +import torch + +from internlm.core.context import global_context as gpc +from internlm.core.context.parallel_context import Config +from internlm.core.trainer import TrainState +from internlm.solver.optimizer.hybrid_zero_optim import HybridZeroOptimizer +from internlm.utils.common import SingletonMeta +from internlm.utils.model_checkpoint import CheckpointManager +from internlm.utils.storage_manager import wait_async_upload_finish +from tests.test_utils.common_fixture import ( # noqa # pylint: disable=unused-import + init_dist_and_model, + reset_singletons, +) + +TOTAL_STEP = 6 + +CKPT_EVERY = 4 +SNPASHOT_EVERY = 2 +OSS_NAME = os.environ["OSS_BUCKET_NAME"] +OSS_IP = os.environ["OSS_IP"] +USER = os.environ["USER"] +JOB_NAME = "CI_TEST" +LOCAL_SAVE_PATH = "local:local_ckpt" + +BOTO_SAVE_PATH = f"boto3:s3://{OSS_NAME}.{OSS_IP}/{USER}/{JOB_NAME}" +BOTO_SAVE_PATH_NO_PRFIX = f"s3://{OSS_NAME}.{OSS_IP}/{USER}/{JOB_NAME}/" + +ASYNC_TMP_FOLDER = "./async_tmp_folder" + + +def del_tmp_file(): + try: + shutil.rmtree(ASYNC_TMP_FOLDER, ignore_errors=True) + except FileNotFoundError: + pass + + try: + shutil.rmtree(LOCAL_SAVE_PATH.split(":")[1], ignore_errors=True) + except FileNotFoundError: + pass + + try: + cmd = r"/mnt/petrelfs/share/sensesync --dryrun --deleteSrc cp " + BOTO_SAVE_PATH_NO_PRFIX + " / " + with Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True) as output: + results, presults = "", "" + for line in iter(output.stdout.readline, b""): + results += str(line.rstrip()) + presults += line.rstrip().decode() + "\n" + print(presults, flush=True) + except FileNotFoundError: + pass + + +ckpt_config_list = [ + # Old interface format + dict( + enable_save_ckpt=True, + save_ckpt_folder=BOTO_SAVE_PATH, + load_optimizer=True, + checkpoint_every=CKPT_EVERY, + async_upload=True, + async_upload_tmp_folder=ASYNC_TMP_FOLDER, + snapshot_ckpt_folder="/".join([BOTO_SAVE_PATH, "snapshot"]), + oss_snapshot_freq=SNPASHOT_EVERY, + stop_file_path=None, + load_model_only_folder=None, + load_given_ckpt=False, + load_ckpt_folder=None, + is_old_api=True, + ), + # Old interface format + dict( + enable_save_ckpt=True, + save_ckpt_folder=LOCAL_SAVE_PATH, + load_optimizer=True, + checkpoint_every=CKPT_EVERY, + async_upload=False, + async_upload_tmp_folder=ASYNC_TMP_FOLDER, + snapshot_ckpt_folder="/".join([LOCAL_SAVE_PATH, "snapshot"]), + oss_snapshot_freq=SNPASHOT_EVERY, + stop_file_path=None, + load_model_only_folder=None, + load_given_ckpt=False, + load_ckpt_folder=None, + is_old_api=True, + ), + # New interface format + dict( + enable_save_ckpt=True, + save_ckpt_folder=BOTO_SAVE_PATH, + checkpoint_every=CKPT_EVERY, + async_upload=True, + async_upload_tmp_folder=ASYNC_TMP_FOLDER, + oss_snapshot_freq=SNPASHOT_EVERY, + stop_file_path=None, + is_old_api=False, + auto_resume=True, + ), + dict( + enable_save_ckpt=True, + save_ckpt_folder=LOCAL_SAVE_PATH, + checkpoint_every=CKPT_EVERY, + async_upload=False, + async_upload_tmp_folder=ASYNC_TMP_FOLDER, + oss_snapshot_freq=SNPASHOT_EVERY, + stop_file_path=None, + load_ckpt_folder=None, + is_old_api=False, + auto_resume=True, + ), +] + + +def overwrite_optim_state(optim, set_value): + if isinstance(optim, HybridZeroOptimizer): + for group_id, p in optim._fp32_flat_param_groups_of_current_rank.items(): + if optim._zero_local_rank not in optim.param_group_no_params_ranks[group_id]: + # p.copy_(torch.full_like(p, set_value, dtype=p.dtype)) + p.data.fill_(set_value) + for group_id in range(len(optim._fp16_param_groups)): + if optim._zero_local_rank not in optim.param_group_no_params_ranks[group_id]: + fp16_p = optim._param_store.get_flat_fp16_param_by_rank_group( + rank=optim._zero_local_rank, group_id=group_id + ) + fp16_p.fill_(set_value) + else: + for group in optim.param_groups: + for p in group["params"]: + # p.copy_(torch.full_like(p, set_value, dtype=p.dtype)) + p.data.fill_(set_value) + + +def compare_optim_state(optim1, optim2): + re = True + if isinstance(optim1, HybridZeroOptimizer): + fp32_buff1 = optim1._fp32_flat_param_groups_of_current_rank + fp32_buff2 = optim2._fp32_flat_param_groups_of_current_rank + for group_id_1, group_id_2 in zip(fp32_buff1, fp32_buff2): + re &= group_id_1 == group_id_2 + if optim1.zero_local_rank not in optim1.param_group_no_params_ranks[group_id_1]: + re &= torch.equal(fp32_buff1[group_id_1], fp32_buff1[group_id_2]) + else: + for group1, group2 in zip(optim1.param_groups, optim2.param_groups): + for p1, p2 in zip(group1["params"], group2["params"]): + re &= torch.equal(p1, p2) + return re + + +def compare_optim_value(optim, value): + re = True + if isinstance(optim, HybridZeroOptimizer): + for group_id, p in optim._fp32_flat_param_groups_of_current_rank.items(): + if optim._zero_local_rank not in optim.param_group_no_params_ranks[group_id]: + re &= torch.equal(p, torch.full_like(p, value, dtype=p.dtype)) + for group_id in range(len(optim._fp16_param_groups)): + if optim._zero_local_rank not in optim.param_group_no_params_ranks[group_id]: + fp16_p = optim._param_store.get_flat_fp16_param_by_rank_group( + rank=optim._zero_local_rank, group_id=group_id + ) + re &= torch.equal(fp16_p, torch.full_like(fp16_p, value, dtype=fp16_p.dtype)) + else: + for group in optim.param_groups: + for p in group["params"]: + re &= torch.equal(p, torch.full_like(p, value, dtype=p.dtype)) + return re + + +def overwrite_model_value(model, value): + for p in model.parameters(): + # p.copy_(torch.full_like(p, value, dtype=p.dtype)) + p.data.fill_(value) + + +def compare_model_value(model, value): + re = True + for p in model.parameters(): + re &= torch.equal(p, torch.full_like(p, value, dtype=p.dtype)) + return re + + +@pytest.fixture(scope="function") +def del_tmp(): + del_tmp_file() + yield + del_tmp_file() + + +@pytest.mark.usefixtures("del_tmp") +@pytest.mark.usefixtures("reset_singletons") +@pytest.mark.parametrize("ckpt_config", ckpt_config_list) +def test_ckpt_mm(ckpt_config, init_dist_and_model): # noqa # pylint: disable=unused-import + from internlm.utils.model_checkpoint import CheckpointLoadMask, CheckpointLoadType + + ckpt_config = Config(ckpt_config) + assert ckpt_config.checkpoint_every < TOTAL_STEP + assert ckpt_config.oss_snapshot_freq < TOTAL_STEP + + model, opim = init_dist_and_model + train_state = TrainState(gpc.config, None) + if isinstance(opim, HybridZeroOptimizer): + print("Is HybridZeroOptimizer!", flush=True) + else: + print("Is naive Adam!", flush=True) + + ckpt_mm = CheckpointManager(ckpt_config, model=model, optimizer=opim) + latest_ckpt_step = None + for i in range(TOTAL_STEP + 1): + overwrite_model_value(model, i) + overwrite_optim_state(opim, i) + + train_state.batch_count = i + train_state.step_count += 1 + + save_ckpts, _, _ = ckpt_mm.is_now_to_save_ckpt(train_state) + if save_ckpts: + latest_ckpt_step = i + + ckpt_mm.try_save_checkpoint(train_state) + + wait_async_upload_finish() + latest_ckpt_info = ckpt_mm.query_lastest_ckpt() + assert latest_ckpt_info is not None + latest_ckpt = latest_ckpt_info["path"] + if ckpt_mm.save_ckpt_folder.startswith("local"): + assert latest_ckpt == "local:local_ckpt/snapshot/0", latest_ckpt + else: + assert latest_ckpt == f"{BOTO_SAVE_PATH}/snapshot/0", latest_ckpt + + del ckpt_mm + SingletonMeta._instances = {} + ckpt_mm = CheckpointManager(ckpt_config, model=model, optimizer=opim) + ckpt_mm.try_resume_training(train_state) + assert latest_ckpt_step == 5 + assert train_state.step_count == 6 + assert train_state.batch_count == 6 + assert compare_optim_value(ckpt_mm.optimizer, latest_ckpt_step), ckpt_mm.optimizer.param_groups[0]["params"][0] + assert compare_model_value(ckpt_mm.model, latest_ckpt_step), list(ckpt_mm.model.parameters())[0][0] + + if ckpt_mm.save_ckpt_folder.startswith("local:"): + ckpt_mm.load_ckpt_info = dict( + path=os.path.join(LOCAL_SAVE_PATH, "4"), + content=CheckpointLoadMask(("all",)), + ckpt_type=CheckpointLoadType.INTERNLM, + ) + else: + ckpt_mm.load_ckpt_info = dict( + path=os.path.join(BOTO_SAVE_PATH, "4"), + content=CheckpointLoadMask(("all",)), + ckpt_type=CheckpointLoadType.INTERNLM, + ) + + ckpt_mm.try_resume_training(train_state) + + assert train_state.step_count == 4 + assert train_state.batch_count == 4 + assert compare_optim_value(ckpt_mm.optimizer, 3), ckpt_mm.optimizer.param_groups[0]["params"][0] + assert compare_model_value(ckpt_mm.model, 3), list(ckpt_mm.model.parameters())[0][0] + + +@pytest.mark.usefixtures("del_tmp") +@pytest.mark.usefixtures("reset_singletons") +@pytest.mark.parametrize("ckpt_config", ckpt_config_list) +def test_ckpt_mm_ping(ckpt_config, init_dist_and_model): # noqa # pylint: disable=unused-import + ckpt_config = Config(ckpt_config) + + model, opim = init_dist_and_model + SingletonMeta._instances = {} + ckpt_mm = CheckpointManager(ckpt_config, model=model, optimizer=opim) + ckpt_mm.try_ping_storage() + + +if __name__ == "__main__": + pytest.main() diff --git a/tests/test_utils/test_storage_manager.py b/tests/test_utils/test_storage_manager.py new file mode 100644 index 0000000..eef4152 --- /dev/null +++ b/tests/test_utils/test_storage_manager.py @@ -0,0 +1,26 @@ +import pytest + +from internlm.core.context.parallel_context import Config +from internlm.initialize.launch import get_config_value +from tests.test_utils.common_fixture import ( # noqa # pylint: disable=unused-import + BOTO_SAVE_PATH, + TOTAL_STEP, + ckpt_config_list, + del_tmp_file, + init_dist_and_model, + reset_singletons, +) + + +@pytest.mark.usefixtures("reset_singletons") +@pytest.mark.parametrize("ckpt_config", ckpt_config_list) +def test_storage_mm(ckpt_config, init_dist_and_model): # noqa # pylint: disable=unused-argument + from internlm.utils.storage_manager import get_storage_manager, init_storage_manager + + ckpt_config = Config(ckpt_config) + enable_save_ckpt = get_config_value(ckpt_config, "enable_save_ckpt", False) + async_upload_tmp_folder = get_config_value(ckpt_config, "async_upload_tmp_folder", False) + async_upload = get_config_value(ckpt_config, "async_upload", False) + + init_storage_manager(enable_save_ckpt, async_upload_tmp_folder, async_upload) + get_storage_manager() diff --git a/train.py b/train.py index 69cdd3c..dbdc09d 100644 --- a/train.py +++ b/train.py @@ -35,7 +35,6 @@ from internlm.utils.common import ( parse_args, ) from internlm.utils.evaluation import evaluate_on_val_dls -from internlm.utils.gputest import bench_gpu, bench_net from internlm.utils.logger import get_logger, initialize_uniscale_logger from internlm.utils.megatron_timers import megatron_timer as timer from internlm.utils.model_checkpoint import CheckpointManager @@ -73,7 +72,6 @@ def main(args): total_steps = gpc.config.data.total_steps valid_every = gpc.config.data.valid_every label_smoothing = gpc.config.loss.label_smoothing - lr = gpc.config.adam.lr get_tflops_func = partial( get_megatron_flops, @@ -96,21 +94,11 @@ def main(args): # initialize customed llm logger uniscale_logger = initialize_llm_logger(start_time=current_time) - # initialize and resume train state - train_state = TrainState(gpc.config) - # initialize model model = initialize_model() with open(args.config, "r") as f: config_lines = f.readlines() - ckpt_manager = CheckpointManager( - ckpt_config=gpc.config.ckpt, - model=model, - model_config=gpc.config.model, - model_config_file="".join(config_lines), - feishu_address=gpc.config.alert_address, - ) # initialize loss function criterion = FlashGPTLMLoss(parallel_output=True, label_smoothing=label_smoothing) @@ -118,15 +106,25 @@ def main(args): # initialize the train and validation data loader train_dl, dataset_types = get_train_data_loader(num_worker=4) val_dls = get_validation_data_loader() - train_state.init_batch_sampler(train_dl) - # Loading model weights must be done before zero is initialized. - ckpt_manager.try_load_model(current_time) + # initialize and resume train state + train_state = TrainState(gpc.config, train_dl.batch_sampler) optimizer, beta2_scheduler, lr_scheduler = initialize_optimizer(model=model) + ckpt_manager = CheckpointManager( + ckpt_config=gpc.config.ckpt, + model=model, + optimizer=optimizer, + lr_scheduler=lr_scheduler, + train_dl=train_dl, + model_config=gpc.config.model, + model_config_file="".join(config_lines), + feishu_address=gpc.config.alert_address, + ) + # Loading other persistent training states. - ckpt_manager.try_resume_training(lr_scheduler, optimizer, lr, train_state, train_dl) + ckpt_manager.try_resume_training(train_state, current_time) # initialize customed llm writer writer = Writer( @@ -197,8 +195,6 @@ def main(args): for batch_count in range(train_state.batch_count, total_steps): if batch_count % 50 == 0: torch.cuda.empty_cache() - bench_gpu() - bench_net() start_time = time.time() timer("one-batch").start() From 7f61505fa014d909c3382576cef808839ba080be Mon Sep 17 00:00:00 2001 From: Sun Peng Date: Tue, 5 Sep 2023 17:47:50 +0800 Subject: [PATCH 19/34] fix/broadcast should not in commu stream (#276) * fix/brocast should not in commu stream * fix/brocast should not in commu stream --------- Co-authored-by: yingtongxiong <974106207@qq.com> --- internlm/solver/optimizer/hybrid_zero_optim.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/internlm/solver/optimizer/hybrid_zero_optim.py b/internlm/solver/optimizer/hybrid_zero_optim.py index 63d2bfa..700d0dc 100644 --- a/internlm/solver/optimizer/hybrid_zero_optim.py +++ b/internlm/solver/optimizer/hybrid_zero_optim.py @@ -164,9 +164,6 @@ class HybridZeroOptimizer(BaseOptimizer): self._param_bcast_sync_handler = param_bcast_sync_handler if self._overlap_sync_param: assert self._param_bcast_sync_handler is not None - self._broadcast_comm_stream = torch.cuda.Stream() - else: - self._broadcast_comm_stream = torch.cuda.current_stream() # iterate over the param group in the optimizer # partition these param groups for data parallel training @@ -648,8 +645,7 @@ class HybridZeroOptimizer(BaseOptimizer): fp32_param = self._fp32_flat_param_groups_of_current_rank[group_id] fp16_param.data.copy_(fp32_param) - with torch.cuda.stream(self._broadcast_comm_stream): - self.broadcast_params() + self.broadcast_params() timer("step").stop() From 9445faf5bef62304a54b6edd5dd7c9d737e466b6 Mon Sep 17 00:00:00 2001 From: ytxiong <45058324+yingtongxiong@users.noreply.github.com> Date: Tue, 5 Sep 2023 19:03:02 +0800 Subject: [PATCH 20/34] fix(model): set tensor parallel attribute for mlp (#271) * set is_tensor_parallel attribute for mlp * fix lint --- internlm/model/linear.py | 8 +------- internlm/model/modeling_internlm.py | 3 +++ 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/internlm/model/linear.py b/internlm/model/linear.py index 32f29f8..5a3a4eb 100644 --- a/internlm/model/linear.py +++ b/internlm/model/linear.py @@ -9,7 +9,7 @@ from flash_attn.ops.fused_dense import ColumnParallelLinear, RowParallelLinear from flash_attn.utils.distributed import all_reduce, reduce_scatter from torch import nn -from internlm.core.context import IS_TENSOR_PARALLEL, ParallelMode +from internlm.core.context import ParallelMode from internlm.core.context import global_context as gpc from internlm.model.utils import fused_dense_func_torch @@ -195,12 +195,6 @@ class FeedForward(nn.Module): device=device, dtype=dtype, ) - # need to assign tp attribute so that colossalai know it is tensor parallel module - - if gpc.get_world_size(ParallelMode.TENSOR) > 1: - for name in ["w1", "w2", "w3"]: - for param in getattr(self, name).parameters(): - setattr(param, IS_TENSOR_PARALLEL, True) def forward(self, x): out = self.w3(F.silu(self.w1(x)) * self.w2(x)) diff --git a/internlm/model/modeling_internlm.py b/internlm/model/modeling_internlm.py index 4494959..0ca805e 100644 --- a/internlm/model/modeling_internlm.py +++ b/internlm/model/modeling_internlm.py @@ -127,6 +127,9 @@ class PackedFlashBaseLayer1D(nn.Module): device=device, dtype=dtype, ) + for _, param in self.mlp.named_parameters(): + if gpc.get_world_size(ParallelMode.TENSOR) > 1: + setattr(param, IS_TENSOR_PARALLEL, True) self.dropout2 = nn.Dropout(drop_rate) self.use_swiglu = use_swiglu self.use_scaled_init = use_scaled_init From 8d8d811e107ee3223a2ab6ea09d7e80bf5e67ee8 Mon Sep 17 00:00:00 2001 From: jiaopenglong <44927264+JiaoPL@users.noreply.github.com> Date: Tue, 5 Sep 2023 19:24:01 +0800 Subject: [PATCH 21/34] feat(monitor): add light monitor (#275) * add light monitor * filter key of metrics dict * test no light_monitor case * mv init_light_monitor to initialize_distributed_env --- internlm/initialize/launch.py | 9 +++++ internlm/monitor/__init__.py | 9 ++++- internlm/monitor/alert.py | 51 +++++++++++++++++++++++++++++ internlm/train/training_internlm.py | 5 ++- 4 files changed, 72 insertions(+), 2 deletions(-) diff --git a/internlm/initialize/launch.py b/internlm/initialize/launch.py index b446934..bd45183 100644 --- a/internlm/initialize/launch.py +++ b/internlm/initialize/launch.py @@ -10,6 +10,7 @@ import torch from internlm.core.context import Config from internlm.core.context import global_context as gpc +from internlm.monitor import initialize_light_monitor from internlm.utils.common import get_master_node from internlm.utils.logger import get_logger @@ -332,6 +333,14 @@ def launch( f"tensor parallel size: {gpc.tensor_parallel_size}", ) + # init light monitor client + light_monitor_address = gpc.config.get("light_monitor_address", None) + if light_monitor_address is None: + if gpc.is_rank_for_log(): + logger.warning("monitor address is none, monitor could not be used!") + else: + initialize_light_monitor(light_monitor_address) + def launch_from_slurm( config: Union[str, Path, Config, Dict], diff --git a/internlm/monitor/__init__.py b/internlm/monitor/__init__.py index b100cde..2501d66 100644 --- a/internlm/monitor/__init__.py +++ b/internlm/monitor/__init__.py @@ -1,4 +1,11 @@ +from .alert import initialize_light_monitor, send_heartbeat from .monitor import initialize_monitor_manager, send_alert_message from .utils import set_env_var -__all__ = ["send_alert_message", "initialize_monitor_manager", "set_env_var"] +__all__ = [ + "send_alert_message", + "initialize_monitor_manager", + "set_env_var", + "initialize_light_monitor", + "send_heartbeat", +] diff --git a/internlm/monitor/alert.py b/internlm/monitor/alert.py index 78b6040..1772e7f 100644 --- a/internlm/monitor/alert.py +++ b/internlm/monitor/alert.py @@ -1,8 +1,59 @@ import json +import math +import os +import re import time +from typing import Dict import requests +from internlm.utils.logger import get_logger + +logger = get_logger(__file__) + + +def initialize_light_monitor(monitor_address: str = None): + try: + from uniscale_monitoring import init_monitor + + init_monitor(monitor_address) + except Exception as e: + logger.warning(f"init monitor meet error: {e}") + + +def send_heartbeat(msg_type: str, msg: Dict): + def nan2none(v): + if isinstance(v, float) and math.isnan(v): + return None + return v + + try: + from uniscale_monitoring import send_meta + + data = {} + for k, v in msg.items(): + if isinstance(v, Dict): + for k1, v1 in v.items(): + new_k = f"{k}_{k1}".split(" ")[0] + new_k = re.sub(r"[^a-zA-Z0-9_]", "_", new_k) + data[new_k] = nan2none(v1) + else: + new_k = k.split(" ")[0] + new_k = re.sub(r"[^a-zA-Z0-9_]", "_", new_k) + data[new_k] = nan2none(v) + + if os.getenv("CLUSTER_NAME"): + data.update({"cluster": os.getenv("CLUSTER_NAME")}) + if msg_type == "train_metrics": + data.update({"msg_type": "train_metrics"}) + elif msg_type == "init_time": + data.update({"msg_type": "init_time"}) + elif msg_type == "stage_time": + data.update({"msg_type": "stage_time"}) + send_meta(data, timeout=0.1) + except Exception as e: + logger.warning(f"send heartbeat meet error: {e}") + def send_feishu_msg_with_webhook(webhook: str, title: str, message: str): """ diff --git a/internlm/train/training_internlm.py b/internlm/train/training_internlm.py index 9c2ded0..a42758a 100644 --- a/internlm/train/training_internlm.py +++ b/internlm/train/training_internlm.py @@ -24,7 +24,7 @@ from internlm.data.packed_dataset import ( get_packed_dataset_without_short_length, ) from internlm.data.utils import DATASET_TYPE_IDS_MAP, unpack_data -from internlm.monitor import set_env_var +from internlm.monitor import send_heartbeat, set_env_var from internlm.monitor.monitor import monitor_manager as mm from internlm.solver.beta2_scheduler import Beta2Scheduler from internlm.solver.lr_scheduler import FineTuneCosineAnnealingWarmupLR @@ -394,6 +394,9 @@ def record_current_batch_training_metrics( else: writer.add_scalar(key=key, value=value, step=train_state.step_count) + if gpc.config.get("light_monitor_address", None) and batch_count % 50 == 0: + send_heartbeat("train_metrics", infos) + if update_panel: # metrics shown with dashboard panels panel_metrics = { From 8acf823a04802910bfdba667bcfc2ede166a244c Mon Sep 17 00:00:00 2001 From: Guoteng <32697156+SolenoidWGT@users.noreply.github.com> Date: Wed, 6 Sep 2023 01:15:09 +0800 Subject: [PATCH 22/34] fix(storage): fix and refactor storage api (#281) --- internlm/utils/storage_manager.py | 165 ++++++++++++---------- tests/__init__.py | 0 tests/test_utils/common_fixture.py | 40 +++++- tests/test_utils/test_model_checkpoint.py | 39 +---- tests/test_utils/test_storage_manager.py | 73 +++++++++- 5 files changed, 205 insertions(+), 112 deletions(-) create mode 100644 tests/__init__.py diff --git a/internlm/utils/storage_manager.py b/internlm/utils/storage_manager.py index 8f562e4..36bd105 100644 --- a/internlm/utils/storage_manager.py +++ b/internlm/utils/storage_manager.py @@ -46,12 +46,12 @@ def get_fns(fp: str): return storage_manager.get_fns(fp) -def llm_load(fp: str, *args, **kwargs): - return storage_manager.load(fp, *args, **kwargs) +def llm_load(fp: str, **kwargs): + return storage_manager.load(fp, **kwargs) -def llm_save(save_path: str, saved_obj: Any, *args, **kwargs): - storage_manager.save(save_path, *args, saved_obj=saved_obj, **kwargs) +def llm_save(save_path: str, saved_obj: Any, **kwargs): + storage_manager.save(save_path, to_save_obj=saved_obj, **kwargs) class StorageClient: @@ -63,19 +63,23 @@ class StorageClient: self.handler = handler @staticmethod - def load(client, load_path: str, *args, **kwargs): + def load(*args, **kwargs): raise NotImplementedError @staticmethod - def sync_upload_fileobj(*args, saved_obj=None, **kwargs): + def sync_upload_fileobj(*args, **kwargs): raise NotImplementedError @staticmethod - def assert_fp_exists(client): + def async_upload_fileobj(*args, **kwargs): raise NotImplementedError @staticmethod - def get_fns(client): + def assert_fp_exists(*args, **kwargs): + raise NotImplementedError + + @staticmethod + def get_fns(*args, **kwargs): raise NotImplementedError @@ -92,40 +96,65 @@ class Boto3MetaInfo: async_upload_fn: callable, local_nvme_path=None, ) -> None: - self.is_async = is_async + # all need info. self.client = handler self.bucket_name = bucket_name - self.endpoint = endpoint self.file_path = file_path - self.async_upload_fn = async_upload_fn + # only save need info. self.local_nvme_path = local_nvme_path + self.is_async = is_async + self.endpoint = endpoint + self.async_upload_fn = async_upload_fn def __str__(self) -> str: return f"is_async: {self.is_async}, bucket_name:{self.bucket_name}, endpoint:{self.endpoint}, \ local_nvme_path: {self.local_nvme_path}" + @staticmethod + def unpack_boto3_save_meta(meta): + if meta.is_async: + return meta.client, meta.bucket_name, meta.file_path, meta.local_nvme_path + else: + return meta.client, meta.bucket_name, meta.file_path + + @staticmethod + def unpack_boto3_nosave_meta(meta): + return meta.client, meta.bucket_name, meta.file_path + class LocalMetaInfo: """Local meta info for save/load etc.""" - def __init__(self, handler: StorageClient, dest_path: str) -> None: - self.is_async = False - self.client = handler - self.dest_path = dest_path + def __init__(self, file_path: str) -> None: + self.file_path = file_path self.async_upload_fn = None + self.is_async = False + + @staticmethod + def unpack_local_save_meta(meta): + return (meta.file_path,) + + @staticmethod + def unpack_local_nosave_meta(meta): + return (meta.file_path,) -def unpack_meta(meta): - args = [] - is_async = meta.is_async - for k, v in meta.__dict__.items(): - if k in ("endpoint", "async_upload_fn", "is_async"): - continue - if not is_async and k in ("local_nvme_path",): - continue - args.append(v) +def unpack_save_meta(meta: Union[Boto3MetaInfo, LocalMetaInfo]): + if isinstance(meta, Boto3MetaInfo): + return Boto3MetaInfo.unpack_boto3_save_meta(meta) + elif isinstance(meta, LocalMetaInfo): + return LocalMetaInfo.unpack_local_save_meta(meta) + else: + raise ValueError(f"unkonwn meta info: {type(meta)}") - return args + +def unpack_nosave_meta(meta: Union[Boto3MetaInfo, LocalMetaInfo]): + if isinstance(meta, Boto3MetaInfo): + return Boto3MetaInfo.unpack_boto3_nosave_meta(meta) + elif isinstance(meta, LocalMetaInfo): + return LocalMetaInfo.unpack_local_nosave_meta(meta) + else: + raise ValueError(f"unkonwn meta info: {type(meta)}") def compute_file_md5_by_chunk(file_name: str): @@ -205,13 +234,11 @@ class Boto3Client(StorageClient): ) @staticmethod - def sync_upload_fileobj( - handler, bucket_name: str, fp: str, local_nvme_path: str, *args, saved_obj=None, **kwargs - ): # pylint: disable=W0613 + def sync_upload_fileobj(handler, bucket_name: str, fp: str, saved_obj=None, **kwargs): assert saved_obj is not None, "saved_obj is None!" try: with io.BytesIO() as f: - torch.save(saved_obj, f, *args, **kwargs) + torch.save(saved_obj, f, **kwargs) f.seek(0) handler.client.upload_fileobj(f, bucket_name, fp, Config=handler.config) except handler.botocore.exceptions.EndpointConnectionError as exc: @@ -220,14 +247,7 @@ class Boto3Client(StorageClient): ) from exc @staticmethod - def load( - handler, - bucket_name: str, - fp: str, - local_nvme_path: str, # pylint: disable=W0613 - *args, - **kwargs, - ) -> Dict: + def load(handler, bucket_name: str, fp: str, **kwargs) -> Dict: """ Args: fp (str): Path to save, eg. s3://opennlplab/model_weights/xxx/ddd.pt @@ -236,7 +256,7 @@ class Boto3Client(StorageClient): with io.BytesIO() as f: handler.client.download_fileobj(bucket_name, fp, f, Config=handler.config) f.seek(0) - states = torch.load(f, *args, **kwargs) + states = torch.load(f, **kwargs) except handler.botocore.exceptions.EndpointConnectionError as exc: raise RuntimeError( f"Boto3 Network Error: Please Check your Internet Connection in {socket.gethostname()}" @@ -244,11 +264,11 @@ class Boto3Client(StorageClient): return states @staticmethod - def assert_fp_exists(handler, bucket_name: str, fp: str, local_nvme_path: str): # pylint: disable=W0613 + def assert_fp_exists(handler, bucket_name: str, fp: str): # pylint: disable=W0613 assert len(list(handler.client.list_objects(Bucket=bucket_name, Prefix=fp)["Contents"])) > 0, fp @staticmethod - def is_fp_exists(handler, bucket_name: str, fp: str, local_nvme_path: str): # pylint: disable=W0613 + def is_fp_exists(handler, bucket_name: str, fp: str): # pylint: disable=W0613 re = handler.client.list_objects(Bucket=bucket_name, Prefix=fp) if "Contents" in re: return len(list(re["Contents"])) > 0 @@ -256,12 +276,12 @@ class Boto3Client(StorageClient): return False @staticmethod - def get_fns(handler, bucket_name: str, fp: str, local_nvme_path: str, *args, **kwargs): # pylint: disable=W0613 + def get_fns(handler, bucket_name: str, fp: str): """ Ref: https://stackoverflow.com/questions/54314563/ how-to-get-more-than-1000-objects-from-s3-by-using-list-objects-v2 """ - if Boto3Client.is_fp_exists(handler, bucket_name, fp, None): + if Boto3Client.is_fp_exists(handler, bucket_name, fp): paginator = handler.client.get_paginator("list_objects_v2") pages = paginator.paginate(Bucket=bucket_name, Prefix=fp) folder_name_list = [] @@ -302,30 +322,26 @@ class LocalClient(StorageClient): super().__init__(None) @staticmethod - def sync_upload_fileobj(handler, fp: str, *args, saved_obj=None, **kwargs): - assert isinstance(handler, LocalClient) + def sync_upload_fileobj(fp: str, saved_obj=None, **kwargs): assert saved_obj is not None fp_dirname = os.path.dirname(fp) if not os.path.exists(fp_dirname): os.makedirs(fp_dirname, exist_ok=True) - torch.save(saved_obj, fp, *args, **kwargs) + torch.save(saved_obj, fp, **kwargs) @staticmethod - def load(handler, fp: str, *args, **kwargs): # pylint: disable=W0613 - assert isinstance(handler, LocalClient) - assert os.path.exists(fp), f"{fp} is not found!" - with open(fp, "rb") as f: - states = torch.load(f, *args, **kwargs) + def load(load_path: str, **kwargs): + assert os.path.exists(load_path), f"{load_path} is not found!" + with open(load_path, "rb") as f: + states = torch.load(f, **kwargs) return states @staticmethod - def assert_fp_exists(handler, folder): - assert isinstance(handler, LocalClient) + def assert_fp_exists(folder): assert os.path.exists(folder), folder @staticmethod - def get_fns(handler, folder): - assert isinstance(handler, LocalClient) + def get_fns(folder): if not os.path.exists(folder): if gpc.is_rank_for_log(): logger.warning(f"'{folder}' not found!") @@ -334,8 +350,7 @@ class LocalClient(StorageClient): return os.listdir(folder) @staticmethod - def delete_obj(handler, fp: str): - assert isinstance(handler, LocalClient) + def delete_obj(fp: str): if not os.path.isdir(fp): os.remove(fp) @@ -359,7 +374,10 @@ def get_boto3_meta(fp: str, tmp_local_folder: str, is_async: bool) -> Boto3MetaI assert match is not None, f"url '{fp}' is not a valid boto3 url" bucket_name, endpoint = match.group(1), match.group(2) endpoint = "http://" + endpoint + ":80" - tmp_step_file = get_tmp_file_name(tmp_local_folder, fp) + if is_async: + tmp_step_file = get_tmp_file_name(tmp_local_folder, fp) + else: + tmp_step_file = None return Boto3MetaInfo( is_async=is_async, handler=None, @@ -373,7 +391,7 @@ def get_boto3_meta(fp: str, tmp_local_folder: str, is_async: bool) -> Boto3MetaI def get_local_meta(fp: str) -> LocalMetaInfo: assert not fp.startswith("s3://"), f"Path '{fp}' is not a local path" - return LocalMetaInfo(None, fp) + return LocalMetaInfo(fp) def get_mount_point_free_size(path: str): @@ -459,7 +477,7 @@ class StorageManager(metaclass=SingletonMeta): logger.error(f'tmp_local_folder only have "{free_size}" GB free space, less then 100 GB!') raise RuntimeError(f"Insufficient temporary storage space on {socket.gethostname()}") - def _get_client(self, path=str) -> Union[Boto3MetaInfo, LocalMetaInfo]: + def _get_client(self, path: str, async_mode: bool = False) -> Union[Boto3MetaInfo, LocalMetaInfo]: """ example: local:/path/to/checkpoint @@ -475,7 +493,7 @@ class StorageManager(metaclass=SingletonMeta): meta_info = get_local_meta(path) backend_key = backend elif backend == "boto3": - meta_info = get_boto3_meta(path, self.tmp_local_folder, self.async_mode) + meta_info = get_boto3_meta(path, self.tmp_local_folder, async_mode) backend_key = backend + ":" + meta_info.endpoint init_args = (meta_info.endpoint,) if ( @@ -503,17 +521,22 @@ class StorageManager(metaclass=SingletonMeta): def assert_fp_exists(self, folder) -> None: meta = self._get_client(path=folder) - meta.client.assert_fp_exists(*unpack_meta(meta)) + meta.client.assert_fp_exists(*unpack_nosave_meta(meta)) def get_fns(self, folder) -> List[str]: meta = self._get_client(path=folder) - return meta.client.get_fns(*unpack_meta(meta)) + return meta.client.get_fns(*unpack_nosave_meta(meta)) - def save(self, save_path: str, saved_obj: Any, *args, async_upload=None, **kwargs): - meta = self._get_client(path=save_path) + def save(self, save_path: str, to_save_obj: Any, async_upload=None, **kwargs): if async_upload is None: async_upload = self.async_mode + + if not save_path.startswith("boto3:"): + async_upload = False + + meta = self._get_client(save_path, async_upload) + if async_upload: assert ( self.tmp_local_folder @@ -521,22 +544,22 @@ class StorageManager(metaclass=SingletonMeta): tmp_step_file = meta.local_nvme_path self._to_be_del_files.append(tmp_step_file) with open(tmp_step_file, "wb") as f: - torch.save(saved_obj, f, pickle_protocol=pickle.HIGHEST_PROTOCOL) - self.async_executor(meta.async_upload_fn, *unpack_meta(meta)) + torch.save(to_save_obj, f, pickle_protocol=pickle.HIGHEST_PROTOCOL) + self.async_executor(meta.async_upload_fn, *unpack_save_meta(meta)) os.chmod(tmp_step_file, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) self.async_task_peeding = True else: - meta.client.sync_upload_fileobj(*unpack_meta(meta), *args, saved_obj=saved_obj, **kwargs) + meta.client.sync_upload_fileobj(*unpack_save_meta(meta), saved_obj=to_save_obj, **kwargs) self.upload_count += 1 - def load(self, load_path: str, *args, **kwargs) -> Any: + def load(self, load_path: str, **kwargs) -> Any: self.wait() meta = self._get_client(path=load_path) - return meta.client.load(*unpack_meta(meta), *args, **kwargs) + return meta.client.load(*unpack_nosave_meta(meta), **kwargs) def delete_obj(self, fp: str): meta = self._get_client(path=fp) - meta.client.delete_obj(*unpack_meta(meta)) + meta.client.delete_obj(*unpack_nosave_meta(meta)) def _del_tmp_folder(self): for fp in self._to_be_del_files: @@ -626,7 +649,7 @@ class StorageManager(metaclass=SingletonMeta): if self.async_mode and self.latest_save_folder: self.save( os.path.join(self.latest_save_folder, f"{self.latest_save_step}.step"), - saved_obj=dict({"step": self.latest_save_step}), + to_save_obj=dict({"step": self.latest_save_step}), async_upload=False, ) self.latest_save_folder = None diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_utils/common_fixture.py b/tests/test_utils/common_fixture.py index 83ea8e2..b5f61e3 100644 --- a/tests/test_utils/common_fixture.py +++ b/tests/test_utils/common_fixture.py @@ -1,4 +1,6 @@ import os +import shutil +from subprocess import PIPE, STDOUT, Popen import pytest import torch @@ -8,6 +10,18 @@ from internlm.core.context.parallel_context import Config from internlm.solver.optimizer.hybrid_zero_optim import HybridZeroOptimizer from internlm.utils.common import SingletonMeta +OSS_NAME = os.environ["OSS_BUCKET_NAME"] +OSS_IP = os.environ["OSS_IP"] +USER = os.environ["USER"] +JOB_NAME = "CI_TEST" +LOCAL_SAVE_PATH = "local:local_ckpt" + +BOTO_SAVE_PATH = f"boto3:s3://{OSS_NAME}.{OSS_IP}/{USER}/{JOB_NAME}" +BOTO_SAVE_PATH_NO_PRFIX = f"s3://{OSS_NAME}.{OSS_IP}/{USER}/{JOB_NAME}/" + +ASYNC_TMP_FOLDER = "./async_tmp_folder" + + # 1B init_config = Config( dict( @@ -108,8 +122,10 @@ def reset_singletons(): def reset_seed(): from internlm.core.context.random import _SEED_MANAGER + _SEED_MANAGER.reset() + @pytest.fixture(scope="module") def init_dist_and_model(): from internlm.initialize import initialize_distributed_env @@ -136,8 +152,30 @@ def init_dist_and_model(): reset_seed() - def enter_flag(text): print(f"{text} begin!", flush=True) yield print(f"{text} end!", flush=True) + + +def del_tmp_file(): + try: + shutil.rmtree(ASYNC_TMP_FOLDER, ignore_errors=True) + except FileNotFoundError: + pass + + try: + shutil.rmtree(LOCAL_SAVE_PATH.split(":")[1], ignore_errors=True) + except FileNotFoundError: + pass + + try: + cmd = r"/mnt/petrelfs/share/sensesync --dryrun --deleteSrc cp " + BOTO_SAVE_PATH_NO_PRFIX + " / " + with Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True) as output: + results, presults = "", "" + for line in iter(output.stdout.readline, b""): + results += str(line.rstrip()) + presults += line.rstrip().decode() + "\n" + print(presults, flush=True) + except FileNotFoundError: + pass diff --git a/tests/test_utils/test_model_checkpoint.py b/tests/test_utils/test_model_checkpoint.py index 0a93dba..bd93436 100644 --- a/tests/test_utils/test_model_checkpoint.py +++ b/tests/test_utils/test_model_checkpoint.py @@ -1,6 +1,4 @@ import os -import shutil -from subprocess import PIPE, STDOUT, Popen import pytest import torch @@ -13,6 +11,10 @@ from internlm.utils.common import SingletonMeta from internlm.utils.model_checkpoint import CheckpointManager from internlm.utils.storage_manager import wait_async_upload_finish from tests.test_utils.common_fixture import ( # noqa # pylint: disable=unused-import + ASYNC_TMP_FOLDER, + BOTO_SAVE_PATH, + LOCAL_SAVE_PATH, + del_tmp_file, init_dist_and_model, reset_singletons, ) @@ -21,39 +23,6 @@ TOTAL_STEP = 6 CKPT_EVERY = 4 SNPASHOT_EVERY = 2 -OSS_NAME = os.environ["OSS_BUCKET_NAME"] -OSS_IP = os.environ["OSS_IP"] -USER = os.environ["USER"] -JOB_NAME = "CI_TEST" -LOCAL_SAVE_PATH = "local:local_ckpt" - -BOTO_SAVE_PATH = f"boto3:s3://{OSS_NAME}.{OSS_IP}/{USER}/{JOB_NAME}" -BOTO_SAVE_PATH_NO_PRFIX = f"s3://{OSS_NAME}.{OSS_IP}/{USER}/{JOB_NAME}/" - -ASYNC_TMP_FOLDER = "./async_tmp_folder" - - -def del_tmp_file(): - try: - shutil.rmtree(ASYNC_TMP_FOLDER, ignore_errors=True) - except FileNotFoundError: - pass - - try: - shutil.rmtree(LOCAL_SAVE_PATH.split(":")[1], ignore_errors=True) - except FileNotFoundError: - pass - - try: - cmd = r"/mnt/petrelfs/share/sensesync --dryrun --deleteSrc cp " + BOTO_SAVE_PATH_NO_PRFIX + " / " - with Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True) as output: - results, presults = "", "" - for line in iter(output.stdout.readline, b""): - results += str(line.rstrip()) - presults += line.rstrip().decode() + "\n" - print(presults, flush=True) - except FileNotFoundError: - pass ckpt_config_list = [ diff --git a/tests/test_utils/test_storage_manager.py b/tests/test_utils/test_storage_manager.py index eef4152..32f905b 100644 --- a/tests/test_utils/test_storage_manager.py +++ b/tests/test_utils/test_storage_manager.py @@ -1,21 +1,75 @@ +import os + import pytest +import torch from internlm.core.context.parallel_context import Config from internlm.initialize.launch import get_config_value from tests.test_utils.common_fixture import ( # noqa # pylint: disable=unused-import + ASYNC_TMP_FOLDER, BOTO_SAVE_PATH, - TOTAL_STEP, - ckpt_config_list, + LOCAL_SAVE_PATH, del_tmp_file, init_dist_and_model, reset_singletons, ) +ASYNC_TMP_FOLDER = "./async_tmp_folder" +ckpt_config_list = [ + # async boto + dict( + enable_save_ckpt=True, + async_upload_tmp_folder=ASYNC_TMP_FOLDER, + async_upload=True, + save_folder=BOTO_SAVE_PATH, + test_id=0, + ), + # sync local + dict( + enable_save_ckpt=True, + async_upload_tmp_folder=None, + async_upload=False, + save_folder=LOCAL_SAVE_PATH, + test_id=1, + ), + # sync boto + dict( + enable_save_ckpt=True, + async_upload_tmp_folder=None, + async_upload=False, + save_folder=BOTO_SAVE_PATH, + test_id=2, + ), + # async local + dict( + enable_save_ckpt=True, + async_upload_tmp_folder=ASYNC_TMP_FOLDER, + async_upload=True, + save_folder=LOCAL_SAVE_PATH, + test_id=3, + ), +] + +@pytest.fixture(scope="function") +def del_tmp(): + del_tmp_file() + yield + del_tmp_file() + + +@pytest.mark.usefixtures("del_tmp") @pytest.mark.usefixtures("reset_singletons") @pytest.mark.parametrize("ckpt_config", ckpt_config_list) -def test_storage_mm(ckpt_config, init_dist_and_model): # noqa # pylint: disable=unused-argument - from internlm.utils.storage_manager import get_storage_manager, init_storage_manager +def test_storage_mm_save_load(ckpt_config, init_dist_and_model): # noqa # pylint: disable=unused-argument + from internlm.utils.storage_manager import ( + check_folder, + get_fns, + init_storage_manager, + llm_load, + llm_save, + wait_async_upload_finish, + ) ckpt_config = Config(ckpt_config) enable_save_ckpt = get_config_value(ckpt_config, "enable_save_ckpt", False) @@ -23,4 +77,13 @@ def test_storage_mm(ckpt_config, init_dist_and_model): # noqa # pylint: disable async_upload = get_config_value(ckpt_config, "async_upload", False) init_storage_manager(enable_save_ckpt, async_upload_tmp_folder, async_upload) - get_storage_manager() + + tobj = torch.rand(64, 64) + save_fn = os.path.join(ckpt_config.save_folder, "test.pt") + llm_save(save_fn, tobj) + if ckpt_config.test_id == 0: + wait_async_upload_finish() + check_folder(save_fn) + assert get_fns(ckpt_config.save_folder)[0] == "test.pt" + load_obj = llm_load(save_fn, map_location="cpu") + assert 0 == ((load_obj != tobj).sum()) From ff181bc5f852fe46327ed65a69d445abd432e24e Mon Sep 17 00:00:00 2001 From: Guoteng <32697156+SolenoidWGT@users.noreply.github.com> Date: Wed, 6 Sep 2023 04:05:04 +0800 Subject: [PATCH 23/34] fix(ckpt): fix checkpoint reload bug (#282) 1. fix only_load tuple convert bug. 2. fix reload_zero_fp32_buff copy bug --- internlm/solver/optimizer/hybrid_zero_optim.py | 2 +- internlm/utils/model_checkpoint.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internlm/solver/optimizer/hybrid_zero_optim.py b/internlm/solver/optimizer/hybrid_zero_optim.py index 2fa1364..0e4343a 100644 --- a/internlm/solver/optimizer/hybrid_zero_optim.py +++ b/internlm/solver/optimizer/hybrid_zero_optim.py @@ -784,4 +784,4 @@ def reload_zero_fp32_buff(optimizer): optimizer._zero_local_rank, group_id ) # param_group["params"] is fp32 flatten optimizer states of this zero rank. - param_group["params"][0].copy_(fp16_flat_current_rank.float()) + param_group["params"][0].data.copy_(fp16_flat_current_rank.float()) diff --git a/internlm/utils/model_checkpoint.py b/internlm/utils/model_checkpoint.py index 87a1fb4..21d76d1 100644 --- a/internlm/utils/model_checkpoint.py +++ b/internlm/utils/model_checkpoint.py @@ -123,7 +123,7 @@ class CheckpointLoadMask: return content in self.load_set and len(self.load_set) > 1 def only_load(self, content: CheckpointLoadContent): - return set(content) == self.load_set + return set((content,)) == self.load_set def __str__(self) -> str: return f"{self.load_set}." From 7f687bf4b342f3e329ac71989527368881ea0d78 Mon Sep 17 00:00:00 2001 From: Wenwen Qu Date: Wed, 6 Sep 2023 14:34:11 +0800 Subject: [PATCH 24/34] fix(core/context): use dummy mode to generate random numbers in model construction (#266) * change mode to dummy in model construction and restore to data when done * add comments * move set_mode(.DATA) to initialize_model(.) --- internlm/core/context/parallel_context.py | 11 ++++++++--- internlm/core/context/process_group_initializer.py | 3 +++ internlm/train/training_internlm.py | 5 +++++ 3 files changed, 16 insertions(+), 3 deletions(-) diff --git a/internlm/core/context/parallel_context.py b/internlm/core/context/parallel_context.py index f1de5ad..730244d 100644 --- a/internlm/core/context/parallel_context.py +++ b/internlm/core/context/parallel_context.py @@ -36,7 +36,7 @@ class Config(dict): config (dict): The dict object to be wrapped. """ - def __init__(self, config: dict = None): + def __init__(self, config: dict = None): # pylint: disable=W0231 if config is not None: for k, v in config.items(): self._add_item(k, v) @@ -100,7 +100,7 @@ class Config(dict): module_name = filepath.stem source_file = SourceFileLoader(fullname=str(module_name), path=str(filepath)) - module = source_file.load_module() # pylint: disable=W4902,E1120 + module = source_file.load_module() # pylint: disable=W4902,E1120,W1505 # load into config config = Config() @@ -526,6 +526,7 @@ class ParallelContext(metaclass=SingletonMeta): if dpseed_with_tpoffset: dp_seed = seed + pipeline_offset * 1024 add_seed(ParallelMode.DATA, dp_seed) + add_seed(ParallelMode.DUMMY, dp_seed) # model parallel seeds are different across ranks if self.is_initialized(ParallelMode.TENSOR): @@ -533,7 +534,11 @@ class ParallelContext(metaclass=SingletonMeta): tp_seed = seed + tp_rank + pipeline_offset * 1024 add_seed(ParallelMode.TENSOR, tp_seed) - set_mode(ParallelMode.DATA) + # we do not set the random state mode to ParallelMode.DATA until model is built (instead, we use a dummy mode + # during model construction), this is because the random state will be different in different tensor parallel + # device of the same data parallel group. The underlying reason is that the device of tp_rank = 0 will perform + # additional random operations during the RowParallelLinear module building process. + set_mode(ParallelMode.DUMMY) seeds = get_seeds() seed_str = ", ".join([f"{k}: {v}" for k, v in seeds.items()]) diff --git a/internlm/core/context/process_group_initializer.py b/internlm/core/context/process_group_initializer.py index facb806..11b41c0 100644 --- a/internlm/core/context/process_group_initializer.py +++ b/internlm/core/context/process_group_initializer.py @@ -35,6 +35,9 @@ class ParallelMode(Enum): # runntime network test NETTEST = "nettest" + # dummy mode, only used during mode construction + DUMMY = "dummy" + class ProcessGroupInitializer(ABC): """An object, knowing the parallelism configuration, that initializes parallel groups. diff --git a/internlm/train/training_internlm.py b/internlm/train/training_internlm.py index a42758a..fec9239 100644 --- a/internlm/train/training_internlm.py +++ b/internlm/train/training_internlm.py @@ -12,6 +12,7 @@ from torch.utils.data import ConcatDataset, DataLoader from internlm.core.context import ParallelMode from internlm.core.context import global_context as gpc +from internlm.core.context.random import set_mode from internlm.core.naive_amp import NaiveAMPModel from internlm.core.trainer import TrainState from internlm.data.batch_sampler import StaticBatchSampler, get_dpsampler_dataloader @@ -80,6 +81,10 @@ def initialize_model(): # the same across tensor parallelism. sync_model_param_within_tp(model) + # Change random state mode to ParallelMode.DATA after model is built, guaranteeing the random + # state in the same dp group are all the same. + set_mode(ParallelMode.DATA) + return model From b6d909d43e127c441fa0c2a336d4ca5d7f0e9093 Mon Sep 17 00:00:00 2001 From: Season Date: Wed, 6 Sep 2023 15:36:03 +0800 Subject: [PATCH 25/34] docs(*): add documentation and reST files for readthedocs (#272) * add initial reST files for readthedocs * fix typos * docs refine and minor fix * add references for parallel training section * fix reST format * fix reST format * fix reST format * add comments for trainer API * add link to step-by-step quickstart guide * docs(code-docs/source/parallel.rst): add paper link url * docs(code-docs/source/parallel.rst): add paper link url * use MyST to render markdown * docs(code-docs/source/initialize.rst): update model init * add requirements for myst-parser * reuse install and usage markdown * docs(code-docs/source/index.rst): add example and q&a * docs(doc/code-docs/*): docs refine * docs(code-docs/source/parallel.rst): update docs for zero config * docs(code-docs/source/example.rst): fix typos for example.rst * docs(code-docs/source/example.rst): refine docs * docs(code-docs/source/example): update example * docs(code-docs/source/example): delete useless example * docs(code-docs/source/*): fix image display issue * docs(code-docs/source/parallel.rst): add docs for communication overlap * docs(code-docs/source/conf.py): update conf.py * docs(code-docs/source/example): update example 30B demo * docs(code-docs/source/parallel.rst): update pipeline parallel * docs(code-docs/source/parallel.rst): update pipeline parallel * docs(code-docs/source/parallel.rst): update pipeline parallel * docs(code-docs/source/parallel.rst): update pipeline parallel * docs(code-docs/source/parallel.rst): update ZeRO1.5 * docs(code-docs/source/parallel.rst): update ZeRO1.5 * docs(code-docs/source): fix word spelling error --------- Co-authored-by: huangting4201 --- doc/code-docs/requirements.txt | 5 +- doc/code-docs/source/checkpoint.rst | 12 +- doc/code-docs/source/conf.py | 12 +- doc/code-docs/source/example/30B_demo.rst | 203 ++++++++++++++++++ doc/code-docs/source/example/7B_demo.rst | 193 +++++++++++++++++ doc/code-docs/source/example/index.rst | 18 ++ doc/code-docs/source/index.rst | 27 ++- doc/code-docs/source/initialize.rst | 57 ++++- doc/code-docs/source/install.md | 72 +------ doc/code-docs/source/monitor.rst | 18 +- doc/code-docs/source/parallel.rst | 141 +++++++++++- doc/code-docs/source/profiler.rst | 22 +- doc/code-docs/source/qa.rst | 2 + doc/code-docs/source/training.rst | 10 +- doc/code-docs/source/usage.md | 4 + doc/en/install.md | 2 +- doc/en/usage.md | 7 +- doc/imgs/pipeline_schedule.png | Bin 0 -> 257725 bytes doc/imgs/sequence_parallel.png | Bin 0 -> 173844 bytes doc/imgs/tensor_parallel.png | Bin 0 -> 131894 bytes .../core/scheduler/no_pipeline_scheduler.py | 14 +- internlm/core/scheduler/pipeline_scheduler.py | 3 +- internlm/core/trainer.py | 6 + internlm/initialize/initialize_trainer.py | 4 +- internlm/initialize/launch.py | 2 +- internlm/model/modeling_internlm.py | 3 +- internlm/monitor/monitor.py | 8 + internlm/train/training_internlm.py | 20 +- 28 files changed, 755 insertions(+), 110 deletions(-) create mode 100644 doc/code-docs/source/example/30B_demo.rst create mode 100644 doc/code-docs/source/example/7B_demo.rst create mode 100644 doc/code-docs/source/example/index.rst create mode 100644 doc/code-docs/source/qa.rst create mode 100644 doc/code-docs/source/usage.md create mode 100644 doc/imgs/pipeline_schedule.png create mode 100644 doc/imgs/sequence_parallel.png create mode 100644 doc/imgs/tensor_parallel.png diff --git a/doc/code-docs/requirements.txt b/doc/code-docs/requirements.txt index 8cbfddf..604cb2c 100644 --- a/doc/code-docs/requirements.txt +++ b/doc/code-docs/requirements.txt @@ -1,10 +1,11 @@ Sphinx sphinx-autobuild -recommonmark sphinx_rtd_theme sphinx_markdown_tables autodoc_pydantic==1.9 enum_tools numpy torch -tqdm \ No newline at end of file +tqdm +pyecharts +myst-parser \ No newline at end of file diff --git a/doc/code-docs/source/checkpoint.rst b/doc/code-docs/source/checkpoint.rst index 3ceed08..08d4c8f 100644 --- a/doc/code-docs/source/checkpoint.rst +++ b/doc/code-docs/source/checkpoint.rst @@ -1,2 +1,12 @@ Model Checkpointing -=================== \ No newline at end of file +=================== + +InternLM uses ``internlm.utils.model_checkpoint.CheckpointManager`` to manage model checkpointing. In the implementation, +we use ``CheckpointManager.try_save_checkpoint(train_state)`` to checkpoint training states at specific steps. InternLM supports +automatic loading of latest ckpt at startup and automatic model checkpointing at signal quit. + +Checkpointing +------------- + +.. autoclass:: internlm.utils.model_checkpoint.CheckpointManager + :members: diff --git a/doc/code-docs/source/conf.py b/doc/code-docs/source/conf.py index 4bce035..856ffb6 100644 --- a/doc/code-docs/source/conf.py +++ b/doc/code-docs/source/conf.py @@ -12,19 +12,25 @@ import sys project = "InternLM" copyright = "2023, InternLM Team" author = "InternLM Team" -release = "v0.2.0" + +with open("../../../version.txt", "r") as f: + release = f.readline().rstrip() + +master_doc = 'index' + +autodoc_member_order = 'bysource' # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration extensions = [ - "recommonmark", "sphinx_rtd_theme", "sphinx.ext.viewcode", "sphinx.ext.autodoc", "sphinxcontrib.autodoc_pydantic", "sphinx.ext.autosectionlabel", "sphinx.ext.napoleon", + "myst_parser", ] pygments_style = "sphinx" @@ -71,7 +77,7 @@ html_static_path = ["_static"] # GitHub integration html_context = { "display_github": True, - "github_user": "pjlab", + "github_user": "InternLM", "github_repo": "InternLM", "github_version": "master", "conf_py_path": "/doc/code-docs/source/", diff --git a/doc/code-docs/source/example/30B_demo.rst b/doc/code-docs/source/example/30B_demo.rst new file mode 100644 index 0000000..98e1915 --- /dev/null +++ b/doc/code-docs/source/example/30B_demo.rst @@ -0,0 +1,203 @@ +30B Demo +================ + +Training Config +---------------- + +30B demo config file example: + +.. code-block:: python + + JOB_NAME = "30b_train" + + SEQ_LEN = 2048 + HIDDEN_SIZE = 6144 + NUM_ATTENTION_HEAD = 48 + MLP_RATIO = 8 / 3 + NUM_LAYER = 60 + VOCAB_SIZE = 103168 + + MODEL_ONLY_FOLDER = "local:llm_ckpts/xxxx" + # Ckpt folder format: + # fs: 'local:/mnt/nfs/XXX' + SAVE_CKPT_FOLDER = "local:llm_ckpts" + LOAD_CKPT_FOLDER = "local:llm_ckpts/49" + + # boto3 Ckpt folder format: + # import os + # BOTO3_IP = os.environ["BOTO3_IP"] # boto3 bucket endpoint + # SAVE_CKPT_FOLDER = f"boto3:s3://model_weights.{BOTO3_IP}/internlm" + # LOAD_CKPT_FOLDER = f"boto3:s3://model_weights.{BOTO3_IP}/internlm/snapshot/1/" + CHECKPOINT_EVERY = 50 + ckpt = dict( + enable_save_ckpt=False, # enable ckpt save. + save_ckpt_folder=SAVE_CKPT_FOLDER, # Path to save training ckpt. + # load_ckpt_folder=LOAD_CKPT_FOLDER, # Ckpt path to resume training(load weights and scheduler/context states). + # load_model_only_folder=MODEL_ONLY_FOLDER, # Path to initialize with given model weights. + load_optimizer=True, # Wheter to load optimizer states when continuing training. + checkpoint_every=CHECKPOINT_EVERY, + async_upload=True, # async ckpt upload. (only work for boto3 ckpt) + async_upload_tmp_folder="/dev/shm/internlm_tmp_ckpt/", # path for temporarily files during asynchronous upload. + snapshot_ckpt_folder="/".join([SAVE_CKPT_FOLDER, "snapshot"]), # directory for snapshot ckpt storage path. + oss_snapshot_freq=int(CHECKPOINT_EVERY / 2), # snapshot ckpt save frequency. + ) + + TRAIN_FOLDER = "/path/to/dataset" + VALID_FOLDER = "/path/to/dataset" + data = dict( + seq_len=SEQ_LEN, + # micro_num means the number of micro_batch contained in one gradient update + micro_num=4, + # packed_length = micro_bsz * SEQ_LEN + micro_bsz=2, + # defaults to the value of micro_num + valid_micro_num=4, + # defaults to 0, means disable evaluate + valid_every=50, + pack_sample_into_one=False, + total_steps=50000, + skip_batches="", + rampup_batch_size="", + # Datasets with less than 50 rows will be discarded + min_length=50, + # train_folder=TRAIN_FOLDER, + # valid_folder=VALID_FOLDER, + ) + + grad_scaler = dict( + fp16=dict( + # the initial loss scale, defaults to 2**16 + initial_scale=2**16, + # the minimum loss scale, defaults to None + min_scale=1, + # the number of steps to increase loss scale when no overflow occurs + growth_interval=1000, + ), + # the multiplication factor for increasing loss scale, defaults to 2 + growth_factor=2, + # the multiplication factor for decreasing loss scale, defaults to 0.5 + backoff_factor=0.5, + # the maximum loss scale, defaults to None + max_scale=2**24, + # the number of overflows before decreasing loss scale, defaults to 2 + hysteresis=2, + ) + + hybrid_zero_optimizer = dict( + # Enable low_level_optimzer overlap_communication + overlap_sync_grad=True, + overlap_sync_param=True, + # bucket size for nccl communication params + reduce_bucket_size=512 * 1024 * 1024, + # grad clipping + clip_grad_norm=1.0, + ) + + loss = dict( + label_smoothing=0, + ) + + adam = dict( + lr=1e-4, + adam_beta1=0.9, + adam_beta2=0.95, + adam_beta2_c=0, + adam_eps=1e-8, + weight_decay=0.01, + ) + + lr_scheduler = dict( + total_steps=data["total_steps"], + init_steps=0, # optimizer_warmup_step + warmup_ratio=0.01, + eta_min=1e-5, + last_epoch=-1, + ) + + beta2_scheduler = dict( + init_beta2=adam["adam_beta2"], + c=adam["adam_beta2_c"], + cur_iter=-1, + ) + + model = dict( + checkpoint=False, # The proportion of layers for activation aheckpointing, the optional value are True/False/[0-1] + num_attention_heads=NUM_ATTENTION_HEAD, + embed_split_hidden=True, + vocab_size=VOCAB_SIZE, + embed_grad_scale=1, + parallel_output=True, + hidden_size=HIDDEN_SIZE, + num_layers=NUM_LAYER, + mlp_ratio=MLP_RATIO, + apply_post_layer_norm=False, + dtype="torch.float16", # Support: "torch.float16", "torch.half", "torch.bfloat16", "torch.float32", "torch.tf32" + norm_type="rmsnorm", + layer_norm_epsilon=1e-5, + use_flash_attn=True, + num_chunks=1, # if num_chunks > 1, interleaved pipeline scheduler is used. + ) + """ + zero1 parallel: + 1. if zero1 <= 0, The size of the zero process group is equal to the size of the dp process group, + so parameters will be divided within the range of dp. + 2. if zero1 == 1, zero is not used, and all dp groups retain the full amount of model parameters. + 3. zero1 > 1 and zero1 <= dp world size, the world size of zero is a subset of dp world size. + For smaller models, it is usually a better choice to split the parameters within nodes with a setting <= 8. + pipeline parallel (dict): + 1. size: int, the size of pipeline parallel. + 2. interleaved_overlap: bool, enable/disable communication overlap when using interleaved pipeline scheduler. + tensor parallel: tensor parallel size, usually the number of GPUs per node. + """ + parallel = dict( + zero1=-1, + tensor=4, + pipeline=dict(size=1, interleaved_overlap=True), + sequence_parallel=False, + ) + + cudnn_deterministic = False + cudnn_benchmark = False + + +Start Training +---------------- + +After completing the data preparation and relevant training configurations, you can start the demo training. +The following example shows how to start distributed training in ``slurm`` environments with 16 GPUs. + +.. code-block:: bash + + srun -p internllm -N 2 -n 16 --ntasks-per-node=8 --gpus-per-task=1 python train.py --config ./configs/30B_sft.py + +Training Results +---------------- + +Taking the configuration of the demo training on two nodes with 16 GPUs on slurm as an example, the training result log is shown below: + +.. code-block:: bash + + 2023-09-06 10:29:26,629 INFO parallel_context.py:508 in set_device -- process rank 10 is bound to host:HOST-10-140-66-20 device: 2 + 2023-09-06 10:29:26,632 INFO parallel_context.py:508 in set_device -- process rank 11 is bound to host:HOST-10-140-66-20 device: 3 + 2023-09-06 10:29:26,634 INFO parallel_context.py:508 in set_device -- process rank 12 is bound to host:HOST-10-140-66-20 device: 4 + 2023-09-06 10:29:26,636 INFO parallel_context.py:508 in set_device -- process rank 9 is bound to host:HOST-10-140-66-20 device: 1 + 2023-09-06 10:29:26,640 INFO parallel_context.py:508 in set_device -- process rank 15 is bound to host:HOST-10-140-66-20 device: 7 + 2023-09-06 10:29:26,639 INFO parallel_context.py:508 in set_device -- process rank 0 is bound to host:HOST-10-140-66-9 device: 0 + 2023-09-06 10:29:26,641 INFO parallel_context.py:508 in set_device -- process rank 2 is bound to host:HOST-10-140-66-9 device: 2 + 2023-09-06 10:29:26,643 INFO parallel_context.py:508 in set_device -- process rank 5 is bound to host:HOST-10-140-66-9 device: 5 + 2023-09-06 10:29:26,645 INFO parallel_context.py:508 in set_device -- process rank 6 is bound to host:HOST-10-140-66-9 device: 6 + 2023-09-06 10:29:26,661 INFO parallel_context.py:508 in set_device -- process rank 13 is bound to host:HOST-10-140-66-20 device: 5 + 2023-09-06 10:29:26,707 INFO parallel_context.py:508 in set_device -- process rank 1 is bound to host:HOST-10-140-66-9 device: 1 + 2023-09-06 10:29:26,826 INFO parallel_context.py:508 in set_device -- process rank 4 is bound to host:HOST-10-140-66-9 device: 4 + 2023-09-06 10:29:26,871 INFO parallel_context.py:508 in set_device -- process rank 7 is bound to host:HOST-10-140-66-9 device: 7 + 2023-09-06 10:29:26,932 INFO parallel_context.py:508 in set_device -- process rank 3 is bound to host:HOST-10-140-66-9 device: 3 + 2023-09-06 10:29:27,156 INFO parallel_context.py:508 in set_device -- process rank 14 is bound to host:HOST-10-140-66-20 device: 6 + 2023-09-06 10:29:27,271 INFO parallel_context.py:508 in set_device -- process rank 8 is bound to host:HOST-10-140-66-20 device: 0 + 2023-09-06 10:29:32,060 INFO launch.py:329 in launch -- Distributed environment is initialized, data parallel size: 4, pipeline parallel size: 1, tensor parallel size: 4 + 2023-09-06 10:30:06,141 INFO hybrid_zero_optim.py:291 in _partition_param_list -- Number of elements on ranks: [1782007296, 1812307968, 1812307968, 1706469888], rank:0 + 2023-09-06T10:30:38.216+08:00 INFO [training_internlm.py, line 413, in record_current_batch_training_metrics] - pid=15224 : tflops=40.00268401421643 step=0 loss=11.548227310180664 tgs (tokens/gpu/second)=227.37 lr=9.779754323328192e-05 loss_scale=65536.0 grad_norm={'0_default': 61.5836932112004} micro_num=4 num_consumed_tokens=65536 inf_nan_skip_batches=0 num_samples_in_batch=18 largest_length=2048 largest_batch=6 smallest_batch=3 adam_beta2=0.95 fwd_bwd_time=12.51 acc=0.0 perplexity=104121.5547 acc/en=0.0 acc/cn=0.0 acc/code=0.0 tokens/en=60571 tokens/cn=0 tokens/code=0 loss_from_metric=11.5533 loss/en=11.5533 loss/cn=nan loss/code=nan + 2023-09-06T10:30:46.343+08:00 INFO [training_internlm.py, line 413, in record_current_batch_training_metrics] - pid=15224 : tflops=89.00005814543725 step=1 loss=6.05580997467041 tgs (tokens/gpu/second)=505.86 lr=9.140576474687264e-05 loss_scale=65536.0 grad_norm={'0_default': 27.397946290506887} micro_num=4 num_consumed_tokens=131072 inf_nan_skip_batches=0 num_samples_in_batch=19 largest_length=2048 largest_batch=6 smallest_batch=3 adam_beta2=0.95 fwd_bwd_time=7.91 acc=0.0885 perplexity=405.4076 acc/en=0.0885 acc/cn=0.0 acc/code=0.0 tokens/en=60265 tokens/cn=0 tokens/code=0 loss_from_metric=6.0049 loss/en=6.0049 loss/cn=nan loss/code=nan + 2023-09-06T10:30:51.443+08:00 INFO [training_internlm.py, line 413, in record_current_batch_training_metrics] - pid=15224 : tflops=142.5138940898651 step=2 loss=5.054169654846191 tgs (tokens/gpu/second)=810.03 lr=8.14503363531613e-05 loss_scale=65536.0 grad_norm={'0_default': 10.438111430093606} micro_num=4 num_consumed_tokens=196608 inf_nan_skip_batches=0 num_samples_in_batch=17 largest_length=2048 largest_batch=5 smallest_batch=3 adam_beta2=0.95 fwd_bwd_time=4.87 acc=0.0715 perplexity=184.2986 acc/en=0.0715 acc/cn=0.0 acc/code=0.0 tokens/en=60244 tokens/cn=0 tokens/code=0 loss_from_metric=5.2166 loss/en=5.2166 loss/cn=nan loss/code=nan + 2023-09-06T10:30:56.509+08:00 INFO [training_internlm.py, line 413, in record_current_batch_training_metrics] - pid=15224 : tflops=143.56131674769466 step=3 loss=4.662276268005371 tgs (tokens/gpu/second)=815.98 lr=6.890576474687264e-05 loss_scale=65536.0 grad_norm={'0_default': 9.15959986316653} micro_num=4 num_consumed_tokens=262144 inf_nan_skip_batches=0 num_samples_in_batch=17 largest_length=2048 largest_batch=5 smallest_batch=3 adam_beta2=0.95 fwd_bwd_time=4.83 acc=0.0775 perplexity=102.6568 acc/en=0.0775 acc/cn=0.0 acc/code=0.0 tokens/en=60328 tokens/cn=0 tokens/code=0 loss_from_metric=4.6314 loss/en=4.6314 loss/cn=nan loss/code=nan + 2023-09-06T10:31:01.552+08:00 INFO [training_internlm.py, line 413, in record_current_batch_training_metrics] - pid=15224 : tflops=143.85087291011183 step=4 loss=4.020431041717529 tgs (tokens/gpu/second)=817.63 lr=5.500000000000001e-05 loss_scale=65536.0 grad_norm={'0_default': 6.873464794412589} micro_num=4 num_consumed_tokens=327680 inf_nan_skip_batches=0 num_samples_in_batch=22 largest_length=1893 largest_batch=8 smallest_batch=4 adam_beta2=0.95 fwd_bwd_time=4.82 acc=0.0701 perplexity=69.1167 acc/en=0.0701 acc/cn=0.0 acc/code=0.0 tokens/en=61028 tokens/cn=0 tokens/code=0 loss_from_metric=4.2358 loss/en=4.2358 loss/cn=nan loss/code=nan + 2023-09-06T10:31:06.830+08:00 INFO [training_internlm.py, line 413, in record_current_batch_training_metrics] - pid=15224 : tflops=142.8966468353613 step=5 loss=3.733311891555786 tgs (tokens/gpu/second)=812.2 lr=4.109423525312737e-05 loss_scale=65536.0 grad_norm={'0_default': 5.811005102730085} micro_num=4 num_consumed_tokens=393216 inf_nan_skip_batches=0 num_samples_in_batch=13 largest_length=2048 largest_batch=4 smallest_batch=3 adam_beta2=0.95 fwd_bwd_time=4.85 acc=0.0688 perplexity=46.298 acc/en=0.0688 acc/cn=0.0 acc/code=0.0 tokens/en=61004 tokens/cn=0 tokens/code=0 loss_from_metric=3.8351 loss/en=3.8351 loss/cn=nan loss/code=nan \ No newline at end of file diff --git a/doc/code-docs/source/example/7B_demo.rst b/doc/code-docs/source/example/7B_demo.rst new file mode 100644 index 0000000..1e264c4 --- /dev/null +++ b/doc/code-docs/source/example/7B_demo.rst @@ -0,0 +1,193 @@ +7B Demo +================ + +Training Config +---------------- + +7B demo config file example: + +.. code-block:: python + + JOB_NAME = "7b_train" + + SEQ_LEN = 2048 + HIDDEN_SIZE = 4096 + NUM_ATTENTION_HEAD = 32 + MLP_RATIO = 8 / 3 + NUM_LAYER = 32 + VOCAB_SIZE = 103168 + + MODEL_ONLY_FOLDER = "local:llm_ckpts/xxxx" + # Ckpt folder format: + # fs: 'local:/mnt/nfs/XXX' + SAVE_CKPT_FOLDER = "local:llm_ckpts" + LOAD_CKPT_FOLDER = "local:llm_ckpts/49" + + # boto3 Ckpt folder format: + # import os + # BOTO3_IP = os.environ["BOTO3_IP"] # boto3 bucket endpoint + # SAVE_CKPT_FOLDER = f"boto3:s3://model_weights.{BOTO3_IP}/internlm" + # LOAD_CKPT_FOLDER = f"boto3:s3://model_weights.{BOTO3_IP}/internlm/snapshot/1/" + CHECKPOINT_EVERY = 50 + ckpt = dict( + enable_save_ckpt=False, # enable ckpt save. + save_ckpt_folder=SAVE_CKPT_FOLDER, # Path to save training ckpt. + # load_ckpt_folder=LOAD_CKPT_FOLDER, # Ckpt path to resume training(load weights and scheduler/context states). + # load_model_only_folder=MODEL_ONLY_FOLDER, # Path to initialize with given model weights. + load_optimizer=True, # Wheter to load optimizer states when continuing training. + checkpoint_every=CHECKPOINT_EVERY, + async_upload=True, # async ckpt upload. (only work for boto3 ckpt) + async_upload_tmp_folder="/dev/shm/internlm_tmp_ckpt/", # path for temporarily files during asynchronous upload. + snapshot_ckpt_folder="/".join([SAVE_CKPT_FOLDER, "snapshot"]), # directory for snapshot ckpt storage path. + oss_snapshot_freq=int(CHECKPOINT_EVERY / 2), # snapshot ckpt save frequency. + ) + + TRAIN_FOLDER = "/path/to/dataset" + VALID_FOLDER = "/path/to/dataset" + data = dict( + seq_len=SEQ_LEN, + # micro_num means the number of micro_batch contained in one gradient update + micro_num=4, + # packed_length = micro_bsz * SEQ_LEN + micro_bsz=2, + # defaults to the value of micro_num + valid_micro_num=4, + # defaults to 0, means disable evaluate + valid_every=50, + pack_sample_into_one=False, + total_steps=50000, + skip_batches="", + rampup_batch_size="", + # Datasets with less than 50 rows will be discarded + min_length=50, + # train_folder=TRAIN_FOLDER, + # valid_folder=VALID_FOLDER, + ) + + grad_scaler = dict( + fp16=dict( + # the initial loss scale, defaults to 2**16 + initial_scale=2**16, + # the minimum loss scale, defaults to None + min_scale=1, + # the number of steps to increase loss scale when no overflow occurs + growth_interval=1000, + ), + # the multiplication factor for increasing loss scale, defaults to 2 + growth_factor=2, + # the multiplication factor for decreasing loss scale, defaults to 0.5 + backoff_factor=0.5, + # the maximum loss scale, defaults to None + max_scale=2**24, + # the number of overflows before decreasing loss scale, defaults to 2 + hysteresis=2, + ) + + hybrid_zero_optimizer = dict( + # Enable low_level_optimzer overlap_communication + overlap_sync_grad=True, + overlap_sync_param=True, + # bucket size for nccl communication params + reduce_bucket_size=512 * 1024 * 1024, + # grad clipping + clip_grad_norm=1.0, + ) + + loss = dict( + label_smoothing=0, + ) + + adam = dict( + lr=1e-4, + adam_beta1=0.9, + adam_beta2=0.95, + adam_beta2_c=0, + adam_eps=1e-8, + weight_decay=0.01, + ) + + lr_scheduler = dict( + total_steps=data["total_steps"], + init_steps=0, # optimizer_warmup_step + warmup_ratio=0.01, + eta_min=1e-5, + last_epoch=-1, + ) + + beta2_scheduler = dict( + init_beta2=adam["adam_beta2"], + c=adam["adam_beta2_c"], + cur_iter=-1, + ) + + model = dict( + checkpoint=False, # The proportion of layers for activation aheckpointing, the optional value are True/False/[0-1] + num_attention_heads=NUM_ATTENTION_HEAD, + embed_split_hidden=True, + vocab_size=VOCAB_SIZE, + embed_grad_scale=1, + parallel_output=True, + hidden_size=HIDDEN_SIZE, + num_layers=NUM_LAYER, + mlp_ratio=MLP_RATIO, + apply_post_layer_norm=False, + dtype="torch.float16", # Support: "torch.float16", "torch.half", "torch.bfloat16", "torch.float32", "torch.tf32" + norm_type="rmsnorm", + layer_norm_epsilon=1e-5, + use_flash_attn=True, + num_chunks=1, # if num_chunks > 1, interleaved pipeline scheduler is used. + ) + """ + zero1 parallel: + 1. if zero1 <= 0, The size of the zero process group is equal to the size of the dp process group, + so parameters will be divided within the range of dp. + 2. if zero1 == 1, zero is not used, and all dp groups retain the full amount of model parameters. + 3. zero1 > 1 and zero1 <= dp world size, the world size of zero is a subset of dp world size. + For smaller models, it is usually a better choice to split the parameters within nodes with a setting <= 8. + pipeline parallel (dict): + 1. size: int, the size of pipeline parallel. + 2. interleaved_overlap: bool, enable/disable communication overlap when using interleaved pipeline scheduler. + tensor parallel: tensor parallel size, usually the number of GPUs per node. + """ + parallel = dict( + zero1=8, + pipeline=dict(size=1, interleaved_overlap=True), + sequence_parallel=False, + ) + + cudnn_deterministic = False + cudnn_benchmark = False + +Start Training +---------------- + +After completing the data preparation and relevant training configurations, you can start the demo training. +The following example shows how to start distributed training in ``slurm`` environments with 8 GPUs. + +.. code-block:: bash + + srun -p internllm -N 1 -n 8 --ntasks-per-node=8 --gpus-per-task=1 python train.py --config ./configs/7B_sft.py + +Training Results +---------------- + +Taking the configuration of the demo training on a single machine with 8 GPUs on slurm as an example, the training result log is shown below: + +.. code-block:: bash + + 2023-09-05 11:47:44,649 INFO parallel_context.py:508 in set_device -- process rank 4 is bound to host:SH-IDC1-10-140-1-110 device: 4 + 2023-09-05 11:47:44,650 INFO parallel_context.py:508 in set_device -- process rank 3 is bound to host:SH-IDC1-10-140-1-110 device: 3 + 2023-09-05 11:47:44,651 INFO parallel_context.py:508 in set_device -- process rank 6 is bound to host:SH-IDC1-10-140-1-110 device: 6 + 2023-09-05 11:47:44,652 INFO parallel_context.py:508 in set_device -- process rank 7 is bound to host:SH-IDC1-10-140-1-110 device: 7 + 2023-09-05 11:47:44,652 INFO parallel_context.py:508 in set_device -- process rank 5 is bound to host:SH-IDC1-10-140-1-110 device: 5 + 2023-09-05 11:47:44,652 INFO parallel_context.py:508 in set_device -- process rank 1 is bound to host:SH-IDC1-10-140-1-110 device: 1 + 2023-09-05 11:47:44,652 INFO parallel_context.py:508 in set_device -- process rank 2 is bound to host:SH-IDC1-10-140-1-110 device: 2 + 2023-09-05 11:47:44,652 INFO parallel_context.py:508 in set_device -- process rank 0 is bound to host:SH-IDC1-10-140-1-110 device: 0 + 2023-09-05 11:47:51,006 INFO launch.py:354 in launch -- Distributed environment is initialized, data parallel size: 8, pipeline parallel size: 1, tensor parallel size: 1 + 2023-09-05 11:49:09,855 INFO hybrid_zero_optim.py:294 in _partition_param_list -- Number of elements on ranks: [894509056, 944865280, 966909952, 966909952, 966909952, 944865280, 966909952, 670068736], rank:0 + 2023-09-05T11:49:58.225+08:00 INFO [training_internlm.py, line 413, in record_current_batch_training_metrics] - pid=6794 : tflops=63.283263603947816 step=0 loss=11.641494750976562 tgs (tokens/gpu/second)=1424.93 lr=4.0000000000000003e-07 loss_scale=65536.0 grad_norm={'0_default': 66.51907327507652} micro_num=4 num_consumed_tokens=131072 inf_nan_skip_batches=0 num_samples_in_batch=19 largest_length=2048 largest_batch=6 smallest_batch=3 adam_beta2=0.95 fwd_bwd_time=6.87 acc=0.0 perplexity=112181.7188 acc/en=0.0 acc/cn=0.0 acc/code=0.0 tokens/en=120836 tokens/cn=0 tokens/code=0 loss_from_metric=11.6279 loss/en=11.6279 loss/cn=nan loss/code=nan + 2023-09-05T11:50:02.553+08:00 INFO [training_internlm.py, line 413, in record_current_batch_training_metrics] - pid=6794 : tflops=171.92140761933035 step=1 loss=11.546792984008789 tgs (tokens/gpu/second)=3871.11 lr=6.000000000000001e-07 loss_scale=65536.0 grad_norm={'0_default': 64.47430144542088} micro_num=4 num_consumed_tokens=262144 inf_nan_skip_batches=0 num_samples_in_batch=16 largest_length=2048 largest_batch=5 smallest_batch=3 adam_beta2=0.95 fwd_bwd_time=4.14 acc=0.0 perplexity=103779.1406 acc/en=0.0 acc/cn=0.0 acc/code=0.0 tokens/en=120572 tokens/cn=0 tokens/code=0 loss_from_metric=11.55 loss/en=11.55 loss/cn=nan loss/code=nan + 2023-09-05T11:50:06.504+08:00 INFO [training_internlm.py, line 413, in record_current_batch_training_metrics] - pid=6794 : tflops=186.0565203348341 step=2 loss=11.106071472167969 tgs (tokens/gpu/second)=4189.39 lr=8.000000000000001e-07 loss_scale=65536.0 grad_norm={'0_default': 62.520055376005146} micro_num=4 num_consumed_tokens=393216 inf_nan_skip_batches=0 num_samples_in_batch=16 largest_length=2048 largest_batch=6 smallest_batch=3 adam_beta2=0.95 fwd_bwd_time=3.82 acc=0.0001 perplexity=71139.6797 acc/en=0.0001 acc/cn=0.0 acc/code=0.0 tokens/en=122032 tokens/cn=0 tokens/code=0 loss_from_metric=11.1724 loss/en=11.1724 loss/cn=nan loss/code=nan + 2023-09-05T11:50:10.487+08:00 INFO [training_internlm.py, line 413, in record_current_batch_training_metrics] - pid=6794 : tflops=185.48897918112567 step=3 loss=10.444510459899902 tgs (tokens/gpu/second)=4176.61 lr=1.0000000000000002e-06 loss_scale=65536.0 grad_norm={'0_default': 57.91057980979166} micro_num=4 num_consumed_tokens=524288 inf_nan_skip_batches=0 num_samples_in_batch=18 largest_length=2048 largest_batch=6 smallest_batch=3 adam_beta2=0.95 fwd_bwd_time=3.83 acc=0.0705 perplexity=39851.1289 acc/en=0.0705 acc/cn=0.0 acc/code=0.0 tokens/en=121125 tokens/cn=0 tokens/code=0 loss_from_metric=10.5929 loss/en=10.5929 loss/cn=nan loss/code=nan + 2023-09-05T11:50:14.476+08:00 INFO [training_internlm.py, line 413, in record_current_batch_training_metrics] - pid=6794 : tflops=185.8751803758398 step=4 loss=9.798665046691895 tgs (tokens/gpu/second)=4185.31 lr=1.2000000000000002e-06 loss_scale=65536.0 grad_norm={'0_default': 48.1136933755285} micro_num=4 num_consumed_tokens=655360 inf_nan_skip_batches=0 num_samples_in_batch=14 largest_length=2048 largest_batch=4 smallest_batch=3 adam_beta2=0.95 fwd_bwd_time=3.82 acc=0.076 perplexity=18045.6699 acc/en=0.076 acc/cn=0.0 acc/code=0.0 tokens/en=121365 tokens/cn=0 tokens/code=0 loss_from_metric=9.8007 loss/en=9.8007 loss/cn=nan loss/code=nan + 2023-09-05T11:50:18.442+08:00 INFO [training_internlm.py, line 413, in record_current_batch_training_metrics] - pid=6794 : tflops=185.6236609556878 step=5 loss=9.215429306030273 tgs (tokens/gpu/second)=4179.64 lr=1.4000000000000001e-06 loss_scale=65536.0 grad_norm={'0_default': 36.95489557069029} micro_num=4 num_consumed_tokens=786432 inf_nan_skip_batches=0 num_samples_in_batch=14 largest_length=2048 largest_batch=4 smallest_batch=3 adam_beta2=0.95 fwd_bwd_time=3.82 acc=0.0767 perplexity=8999.0869 acc/en=0.0767 acc/cn=0.0 acc/code=0.0 tokens/en=121223 tokens/cn=0 tokens/code=0 loss_from_metric=9.1049 loss/en=9.1049 loss/cn=nan loss/code=nan diff --git a/doc/code-docs/source/example/index.rst b/doc/code-docs/source/example/index.rst new file mode 100644 index 0000000..5844b0b --- /dev/null +++ b/doc/code-docs/source/example/index.rst @@ -0,0 +1,18 @@ +Training Example +================ + +7B Demo +------------ + +.. toctree:: + :maxdepth: 2 + + 7B_demo + +30B Demo +------------ + +.. toctree:: + :maxdepth: 2 + + 30B_demo diff --git a/doc/code-docs/source/index.rst b/doc/code-docs/source/index.rst index 3011df6..109dfa5 100644 --- a/doc/code-docs/source/index.rst +++ b/doc/code-docs/source/index.rst @@ -3,6 +3,7 @@ You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. + InternLM ======== @@ -14,6 +15,14 @@ Environment Setup install +Quickstart Guide +------------------- + +.. toctree:: + :maxdepth: 2 + + usage + Model Setup ------------------- @@ -39,7 +48,7 @@ Parallel Training parallel Model Checkpointing -------------------- +-------------------- .. toctree:: :maxdepth: 2 @@ -62,6 +71,22 @@ Monitor monitor +Example +------------------- + +.. toctree:: + :maxdepth: 2 + + example/index + +Q&A +------------------- + +.. toctree:: + :maxdepth: 2 + + qa + Indices and tables ================== diff --git a/doc/code-docs/source/initialize.rst b/doc/code-docs/source/initialize.rst index a638c33..257cffe 100644 --- a/doc/code-docs/source/initialize.rst +++ b/doc/code-docs/source/initialize.rst @@ -20,16 +20,71 @@ parser with some builtin arguments, users can add custom parameters to this pars .. autofunction:: internlm.initialize.get_default_parser -.. _InternLM-init: +.. _InternLM-model-init: Model Initialization ------------------------- +.. autofunction:: internlm.train.initialize_model + +InternLM uses the field ``model_type`` and ``model`` in the config file to control model initialization process. An example model initialization configuration +can be defined as follows: + +.. code-block:: python + + model_type = "INTERNLM" # default is "INTERNLM", used to register classes and modules for model initialization + NUM_ATTENTION_HEAD = 32 + VOCAB_SIZE = 103168 + HIDDEN_SIZE = 4096 + NUM_LAYER = 32 + MLP_RATIO = 8 / 3 + model = dict( + checkpoint=False, # The proportion of layers for activation aheckpointing, the optional value are True/False/[0-1] + num_attention_heads=NUM_ATTENTION_HEAD, + embed_split_hidden=True, + vocab_size=VOCAB_SIZE, + embed_grad_scale=1, + parallel_output=True, + hidden_size=HIDDEN_SIZE, + num_layers=NUM_LAYER, + mlp_ratio=MLP_RATIO, + apply_post_layer_norm=False, + dtype="torch.bfloat16", # Support: "torch.float16", "torch.half", "torch.bfloat16", "torch.float32", "torch.tf32" + norm_type="rmsnorm", + layer_norm_epsilon=1e-5, + use_flash_attn=True, + num_chunks=1, # if num_chunks > 1, interleaved pipeline scheduler is used. + ) + +- The field ``model_type`` specifics the model type has been registered and to be initialized. +- The parameters in field ``model`` specific the configuration settings during model initialization. + +It is worth noting that, users can define new model type, and register model's initialization function by decorater ``@MODEL_INITIALIZER.register_module``, which ``MODEL_INITIALIZER`` is an instantiated object of class ``internlm.util.registry.Registry``, the example is shown as follows. + +.. code-block:: python + + MODEL_TYPE = "NEW_MODEL" + + @MODEL_INITIALIZER.register_module(module_name=MODEL_TYPE) + def build_new_model_with_cfg(*args, **kwargs): + +.. _InternLM-optim-init: + Optimizer Initialization ------------------------- +.. autofunction:: internlm.train.initialize_optimizer + +.. _InternLM-dl-init: + Dataloader Initialization ------------------------- +.. autofunction:: internlm.train.get_train_data_loader + +.. _InternLM-trainer-init: + Trainer Initialization ------------------------- + +.. autofunction:: internlm.initialize.initialize_trainer \ No newline at end of file diff --git a/doc/code-docs/source/install.md b/doc/code-docs/source/install.md index 26f57c0..befb018 100644 --- a/doc/code-docs/source/install.md +++ b/doc/code-docs/source/install.md @@ -1,70 +1,2 @@ -## Installation - -### Environment Preparation -The required packages and corresponding version are shown as follows: -- Python == 3.10 -- GCC == 10.2.0 -- MPFR == 4.1.0 -- CUDA >= 11.7 -- Pytorch >= 1.13.1 -- Transformers >= 4.28.0 -- Flash-Attention >= v1.0.5 -- Apex == 23.05 -- GPU with Ampere or Hopper architecture (such as H100, A100) -- Linux OS - -After installing the above dependencies, some system environment variables need to be updated: -```bash -export CUDA_PATH={path_of_cuda_11.7} -export GCC_HOME={path_of_gcc_10.2.0} -export MPFR_HOME={path_of_mpfr_4.1.0} -export LD_LIBRARY_PATH=${GCC_HOME}/lib64:${MPFR_HOME}/lib:${CUDA_PATH}/lib64:$LD_LIBRARY_PATH -export PATH=${GCC_HOME}/bin:${CUDA_PATH}/bin:$PATH -export CC=${GCC_HOME}/bin/gcc -export CXX=${GCC_HOME}/bin/c++ -``` - -### Environment Installation -Clone the project `internlm` and its dependent submodules from the github repository, as follows: -```bash -git clone git@github.com:InternLM/InternLM.git --recurse-submodules -``` - -It is recommended to build a Python-3.10 virtual environment using conda and install the required dependencies based on the `requirements/` files: -```bash -conda create --name internlm-env python=3.10 -y -conda activate internlm-env -cd internlm -pip install -r requirements/torch.txt -pip install -r requirements/runtime.txt -``` - -Install flash-attention (version v1.0.5): -```bash -cd ./third_party/flash-attention -python setup.py install -cd ./csrc -cd fused_dense_lib && pip install -v . -cd ../xentropy && pip install -v . -cd ../rotary && pip install -v . -cd ../layer_norm && pip install -v . -cd ../../../../ -``` - -Install Apex (version 23.05): -```bash -cd ./third_party/apex -pip install -v --disable-pip-version-check --no-cache-dir --global-option="--cpp_ext" --global-option="--cuda_ext" ./ -cd ../../ -``` - -### Environment Image -Users can obtain an image with the InternLM runtime environment installed from https://hub.docker.com/r/sunpengsdu/internlm. The commands for pulling the image and starting the container are as follows: - -```bash -# pull image -docker pull sunpengsdu/internlm:torch1.13-cuda11.7-flashatten1.0.5-centos -# start container -docker run --gpus all -d -it --shm-size=2gb --name myinternlm sunpengsdu/internlm:torch1.13-cuda11.7-flashatten1.0.5-centos -docker exec -it myinternlm bash -``` +```{include} ../../en/install.md +``` \ No newline at end of file diff --git a/doc/code-docs/source/monitor.rst b/doc/code-docs/source/monitor.rst index ff8cd1b..57d0798 100644 --- a/doc/code-docs/source/monitor.rst +++ b/doc/code-docs/source/monitor.rst @@ -1,10 +1,26 @@ Monitor and Alert ================= - Monitoring ----------------- +InternLM uses ``internlm.monitor.monitor.initialize_monitor_manager()`` to initialize context monitor. During this time, +a singleton ``internlm.monitor.monitor.MonitorManager`` will manage monitoring thread and track training status +with ``internlm.monitor.monitor.MonitorTracker``. + +.. autofunction:: internlm.monitor.monitor.initialize_monitor_manager + +.. autoclass:: internlm.monitor.monitor.MonitorManager + :members: + +.. autoclass:: internlm.monitor.monitor.MonitorTracker + :members: Alerting ----------------- + +InternLM monitor thread periodically tracks loss spike, potential stuck condition, runtime exception, and SIGTERM signal. +When above situation occurs, an alert will be triggered and a message will be sent to the Feishu webhook address by calling +``internlm.monitor.alert.send_feishu_msg_with_webhook()`` + +.. autofunction:: internlm.monitor.alert.send_feishu_msg_with_webhook diff --git a/doc/code-docs/source/parallel.rst b/doc/code-docs/source/parallel.rst index 3515847..dcdebad 100644 --- a/doc/code-docs/source/parallel.rst +++ b/doc/code-docs/source/parallel.rst @@ -1,23 +1,158 @@ Parallel Training -================= +================== -.. 整体说一下并行配置使用方式,接下来再分模块详细说明 +.. Brief introduction to training parallelism, and how-to guide about config setting + +InternLM supports tensor parallel, pipeline parallel, sequence parallel, data parallel, and ZeRO1.5 to parallelize the training pipeline. +When initializing the distributed environment, we need to specify tensor parallel size, pipeline parallel size, data parallel size, +and ZeRO1.5 strategy. + +The parallel setting of InternLM is fully config-driven, and you can change the parallelism by modifying +`config file `_. An exmaple parallel training configuration can be defined as follows: + +.. code-block:: python + + parallel = dict( + zero1=8, + tensor=1, + pipeline=dict(size=1, interleaved_overlap=True), + sequence_parallel=False, + ) + +- zero1: zero parallel strategy, divided into the following three cases, the default value is -1 + + - When ``size <= 0``, the size of the zero1 process group is equal to the size of the data parallel process group, so the optimizer state parameters will be split within the data parallel range. + - When ``size == 1``, zero1 is not used, and all data parallel groups retain the complete optimizer state parameters. + - When ``size > 1`` and ``size <= data_parallel_world_size``, the zero1 process group is a subset of the data parallel process group. + +- tensor: tensor parallel size, usually the number of GPUs per node, the default value is 1 +- pipeline: pipeline parallel strategy + + - size: pipeline parallel size, the default value is 1 + - interleaved_overlap: bool type, when interleaved scheduling, enable or disable communication optimization, the default value is False + +- sequence_parallel: whether to enable sequence parallelism, the default value is False + +Note: `Data parallel size = Total number of GPUs / Pipeline parallel size / Tensor parallel size` Tensor Parallel ----------------- +The implementation of tensor parallel for InternLM is based on `flash attention `_, which has tensor +parallel extensions to parallelize `attention `_ and +`linear `_ blocks in InternLM model. + +To use tensor parallel, you need to set the value of tensor parallel size ``parallel.tensor`` in the config file, which is usually the number of GPUs per node. + +.. figure:: ../../imgs/tensor_parallel.png + :scale: 50% + :class: with-border + + Tensor parallel, adopted from `flash-attention `_ Pipeline Parallel ----------------- +InternLM uses `1F1B `_ (one forward pass followed by one backward pass) for pipeline parallel. For 1F1B strategy, there are two implementations: +(1) non-interleaved scheduler, which is memory-efficient (2) interleaved scheduler, which is both memory-efficient and time-efficient. + +.. figure:: ../../imgs/pipeline_schedule.png + :scale: 45% + :class: with-border + + Non-interleaved and interleaved scheduler for 1F1B pipeline parallelism, adopted from `Megatron-LM `_ + +scheduler for non-interleaved 1F1B strategy +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +To use non-interleaved pipeline scheduler, you need to set ``model.num_chunks = 1`` in the config file. + +.. autoclass:: internlm.core.scheduler.pipeline_scheduler.PipelineScheduler + :members: + +scheduler for interleaved 1F1B strategy +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +To use interleaved pipeline scheduler, you need to set ``model.num_chunks > 1`` in the config file. + +.. autoclass:: internlm.core.scheduler.pipeline_scheduler.InterleavedPipelineScheduler + :members: + +Also, to enable communication overlap when using interleaved pipeline scheduler, you need to set ``parallel.pipeline.interleaved_overlap = True`` +in the config file. + +When ``parallel.pipeline.interleaved_overlap = True``, function ``InterleavedPipelineScheduler._run_1f1b_loop_with_overlap`` will be called and +``internlm.core.communication.AsynCommunicator`` will be created for managing async communication. Asynchronous communication will be enabled in 1F1B stage to make full +use of uplink/downlink bandwidth and achieve communication overlap. + +The difference between 1F1B stage without overlap and 1F1B stage with overlap is shown as follows: + +The 1F1B stage without overlap consists of the following steps: + +.. code-block:: bash + + 1. Perform the forward pass. + 2. Perform the backward pass. + 3. Send the forward output of this iteration to the next stage, and send the backward output of this iteration to the previous stage, and receive the forward and backward inputs for the next iteration. + +The 1F1B stage with overlap consists of the following steps: + +.. code-block:: bash + + 1. Perform the forward pass. + 2. Check if the backward input is ready. + 3. Send the forward output and receive the forward input for the next iteration. + 4. Perform the backward pass. + 5. Check if the forward input is ready. + 6. Send the backward output and receive the backward input for the next iteration. + Sequence Parallel ----------------- +Sequence parallel is a technique to reduce activation memory in layer norm and dropout without additional computation, communication or memory overhead. +The implementation of sequence parallel for InternLM is based on `flash attention `_. + +To enable sequence parallel, you need to set ``parallel.sequence_parallel = True`` in the config file. + +.. figure:: ../../imgs/sequence_parallel.png + :scale: 50% + :class: with-border + + Sequence parallel, adopted from flash-attention Data Parallel ----------------- +InternLM supports data parallel. For data parallel: + +`Data parallel size = Total number of GPUs / Pipeline parallel size / Tensor parallel size` ZeRO1.5 ------------------ \ No newline at end of file +----------------- + +The implementation of ZeRO1.5 uses the concept of hierarchical sharding via config value ``parallel.zero1``, which enables sharding within local nodes. + +1. If ``parallel.zero1 <= 0``, the size of the zero process group is equal to the size of the dp process group, so parameters will be divided within the range of dp. +2. If ``parallel.zero1 == 1``, zero is not used, and all dp groups retain the full amount of model parameters. +3. If ``parallel.zero1 > 1`` and ``parallel.zero1 <= dp world size``, the world size of zero is a subset of dp world size. For smaller models, it is usually a better choice to split the parameters within nodes with a setting ``parallel.zero1 <= 8``. + +Furthermore, you can enable communication-computation overlap, set bucket reduce size, gradient clipping parameters in the config file. + +.. code-block:: python + + hybrid_zero_optimizer = dict( + # Enable low_level_optimzer overlap_communication + overlap_sync_grad=True, + overlap_sync_param=True, + # bucket size for nccl communication params + reduce_bucket_size=512 * 1024 * 1024, + # grad clipping + clip_grad_norm=1.0, + ) + +There are two communication optimizations worth paying attention to here: + +- overlap_sync_grad: If set True, overlapping training backward pass with gradients' all-reduce communication +- overlap_sync_param: If set True, overlapping parameters' broadcast communication with next step's forward pass + +.. autoclass:: internlm.solver.optimizer.hybrid_zero_optim.HybridZeroOptimizer + :members: diff --git a/doc/code-docs/source/profiler.rst b/doc/code-docs/source/profiler.rst index c10f425..aed4d0c 100644 --- a/doc/code-docs/source/profiler.rst +++ b/doc/code-docs/source/profiler.rst @@ -1,11 +1,29 @@ Profiler ======== -.. 可介绍torch profiler, memory profiler的使用 +.. Mainly about the usage of torch profiler and memory profiler Torch Profiler ----------------- +InternLM uses ``internlm.train.initialize_llm_profile()`` to profile performance data, execution time duration and breakdown analysis of +step time. The implementation is based on `torch.profiler `_ and output tracing files can +be visualized with `tensorboard `_. +To use this torch profiler tool, you need to enable profiling by passing the ``--profiling`` flag when starting training. After torch +profiling is completed, you can find the profiling results in the ``{JOB_NAME}/{start_time}/traces/rank{}_dp{}_tp{}_pp{}`` folder. + +.. autofunction:: internlm.train.initialize_llm_profile Memory Profiler ------------------ \ No newline at end of file +----------------- + +InternLM provides a practical solution ``internlm.utils.simple_memory_profiler.SimpleMemoryProfiler`` to monitor actual GPU memory usage. +In the implmentation, model data (including model parameters, model gradients, and optimizer states) and non-model data +(including activations) are calculated. + +To use this memory profiler tool, you need to enable profiling by passing the ``--profiling`` flag when starting training. After memory +profiling is completed, you can find the profiling results (including logs of memory usage at different time point and sunburst charts +showing overall memory usage) for a specific rank device in the ``memory_trace/rank{}_dp{}_tp{}`` folder. + +.. autoclass:: internlm.utils.simple_memory_profiler.SimpleMemoryProfiler + :members: diff --git a/doc/code-docs/source/qa.rst b/doc/code-docs/source/qa.rst new file mode 100644 index 0000000..2c0a7b5 --- /dev/null +++ b/doc/code-docs/source/qa.rst @@ -0,0 +1,2 @@ +Q&A +=== \ No newline at end of file diff --git a/doc/code-docs/source/training.rst b/doc/code-docs/source/training.rst index e9ee124..f8e90dd 100644 --- a/doc/code-docs/source/training.rst +++ b/doc/code-docs/source/training.rst @@ -1,2 +1,10 @@ Training API -============ \ No newline at end of file +============ + +InternLM training API is managed in ``internlm.core.trainer.Trainer``. After defining the training engine and runtime scheduler, +we can call training API to perform training, evaluation, zero gradients and parameter update steps. + +For detailed usage, please refer to Trainer API documentation and examples. + +.. autoclass:: internlm.core.trainer.Trainer + :members: \ No newline at end of file diff --git a/doc/code-docs/source/usage.md b/doc/code-docs/source/usage.md new file mode 100644 index 0000000..3146774 --- /dev/null +++ b/doc/code-docs/source/usage.md @@ -0,0 +1,4 @@ +```{include} ../../en/usage.md +:relative-docs: docs/ +:relative-images: +``` \ No newline at end of file diff --git a/doc/en/install.md b/doc/en/install.md index 591cb5d..c216293 100644 --- a/doc/en/install.md +++ b/doc/en/install.md @@ -1,4 +1,4 @@ -## InternLM Installation +## Installation ### Environment Preparation The required packages and corresponding version are shown as follows: diff --git a/doc/en/usage.md b/doc/en/usage.md index e286edc..45bb8be 100644 --- a/doc/en/usage.md +++ b/doc/en/usage.md @@ -1,4 +1,4 @@ -## Pre-training and Fine-tuning Tutorial for InternLM +## Quickstart Guide for Pre-training and Fine-tuning To start a demo model training, you need to prepare three things: **installation**, **dataset preparation**, and **model training configuration**. In this guide, we will first cover the steps for dataset preparation and then briefly describe the model training configuration. @@ -93,10 +93,7 @@ data = dict( ) ``` -
- -
- +![pack_into_one](../imgs/pack_into_one.png) Currently, it supports passing the dataset file path `train_folder`, and the file format is required to be as follows: diff --git a/doc/imgs/pipeline_schedule.png b/doc/imgs/pipeline_schedule.png new file mode 100644 index 0000000000000000000000000000000000000000..64398cefe9bc50dd87ff2ecc13e99b7a808ed3ae GIT binary patch literal 257725 zcmeFZby$^K_clrhD5-#SDN1Y_B$W~c=~9pe5s+A5r z%Jmc!6!hcknBdMNk0UMk2h~nR>M=?|FV!0O;hKrKyf_Lf+G;b(oe3(+S~lNf5))>HBC6P@?vyB{GAdx z=6fYtUo|M0X4vmUv)tm2nAg=Fxi4W%&J%gGQ!=%Y;NGjT;(9#0BDj(_0)JFEc5Yud zYUhx9v{`nd*$3Y&yGs5t@a6$Uuw>ew{y7+*EQjP?K@sPk#ZJqyCe3>P`tL82sMvq~ zH1c-~gVBh8eNE~9AD4grVqHoMrr+-@{rmDypQ+`;R{H&Qy7>RN{L>3MLx@xQf8B|+ z`s4CnulMsm4knqzQh(iHf&Ov%rx7lfDSJKsbw{oKkIO&J!VmK_6u)10^4?5 zCk+evzRFqaEeOAMNt#4kuW*62x5L9R{ zDK$k;P&Z-RDR?^MdVIRR?%HUNSRlzKD91MB;~keM5PyLVgKkkP6SqP&%HcM@y>uc$ zIjsFr6fP;SfZ(hQSy!j%OV8%uOf08Qru~ zjg?QK9@DmyHv|_UK~=Z~t+kz+<~a~8gfun66Dyp%$vxoY)0(pxP8L*XqY#oODvb#b z#@Vr*j)sIzx1Cziby}x}8U{_2Y7iM-N^{3qHTWIb(Kd>(qfhon?%hAH#5FG*YlS-# z=+j1fyH69uSr)0?ToD-O&M3)dZ?nGdzxH^Rf^PY{d2AAD`J9YMw;vUQe$SFM^D#4V!jk!4z_ZMG@5i^Z26LcEhkVgK1iU#S8+=SJ)_r$&t2OecThD zSMhY0ux>fdJa6f6NRi-`c7O6f7bXK=^&`#Pz{vT-hddjPdP(7!2kb)+R7J6GT8 z`u*@x8OE?xeBzbGe+*KcK(OPFK~`<`dUB5@--D$PO=eUr(m%}`?~>JyEcCGtKL1Wvr29cyZaR( z}1RwE6BS!o*ai<>(Ig{8P z4!goR76y|?lgKs}amsVde+^H8%LkV|;1-!3x%NlF4yz?t*XhsTY@Dy`br*{P;`9lu zx8}%B5RPrF={T?q*>S+tTz#pq^9MNi2s#GS)%;`H4zh3@b#5iE1yb#VV*+fY3m@P# zOk&x%1&f?I-?h+N6JA{-*CwRrT;{GMwhMuFbwuv{jPO4+(@>t`%T4am@kUO(V+jW_ zeQ{1q7r$Mb1Y9+`o4W4bJOUrV1mD@>ClbQNKw9uR1*MInQecY~Ie+Jsv`StotVqM14<0Yx zS(ifnh_s16jysj19J_WWFqWRR&7>%D{*G-#Q&7^mE5lme-m${FY4`EgYisGC#+dFo zl^#J47FzZ8Uwe%?(juIuEk74@N=8A4gkFmQ@6DbJ#wPB4?06XXpzQ@YW z?qSf5twov(Yu^?u5hZZ7EjtzNpGAwogtQ6m&(Kz7eIFRhqp8glft)`O#=OuXy5?l? zhx}@J#d??}KQ-OwLNjFd{Uc)i-1YK!k#_isN1y_tP9?W9(p>n9OJIUi!PRthIU|9x zD7F&PCZgJ_Ffv2ic^Ra!d;*8A#KHU_*A)g+WK$$n29C42SBz>9ov-YxYwg;Ye=MY* zTc?KkKMUxe1qAZ8e-_X`3kZ!k?nB+>@l=i#w0^yMqd>K=;mYMS^MUmFNdqE(*6rZO zZo>uVe?2fAsCj*Br!c=p8Y}^84<3oqvdj(jB!Z$#mB+@9Y3?ZJ4SM#*zuOg!$b z7lpdQypYM#q6oDR_oET@^@5shm4`dEcKzkTuE)5t|9ah%PyfYu9Tp~hGOa3nbqbpw zxhU7IbGcpGM{bgED{3%o(yHMygF!JVFGO&etZYz*WvP?}gUMl`S&Sx}Z(8ENn6=2n zQn{?AuERn_@e9{86FslaFt!G(>{f~*NXCr&lBloCjp{hg_#6)_dA(v}$6e$yZ4rL` zU)-eM<<%We=0K*(F-k2aR3O!B8D?#_)qD0r2fs=a9c`BMy;nc(({)~oOh^>!S5p)@ zd;L--w_D_VlgEA|zaoc(O>=QtwGHfd3fg4f4F})5eg~=ge`}||A-JBE)=XG(2{K}{ z_2KL$bS}h8;{GB730`;jVgHn<>%kBH7tnh57s(>-!?}g^6j#jo>>K^5zBzgg9!+)6 zNr_X&18~rt^6R!K-lv-Kc|9+`AE%Cs5H9A^nTfy;Q7VwP|aDa?2Ub z8JCTGvnC&m`Q&BItGcJCGR~z!2A|evS8g4xmLOh4L-!QZ6e+r>zwg*C25pH?_9Y6H>{(V zBBg-rZTaNN?x-x3G?UET3~#%;w6ldvb+*UZBl=aJ!U=hyD)w-nBx)CW;dEZ_`iywl zxS@F&oFdq#T-%`S_Ve(J;1Nr#?JV}H4LM%RW43naD7GnTSG@_Ys`~6vDaFaDmTZU2 z!N|srM~*`M;+-@r!u}{e29fP#=kRrxGn=Nz;w_T7*N*ZURD{c}+BdQ;Lm_E!(edun z8qKiMP{l;9v2jlr=V_{X@E9vbdkj09_L1w{qp~$M%GZ@E$zx=fX5Q~mmJ&8-l@V?> z1@Qg&+N8-d8h= zVdH@sOJ31ql=k)0l3CvxN>5eDJq)IYjeTaA>w^VQpc(=4rU*x5ZGDAZZ%U?K=J1SMXp4d3@Eo{E3GY z(g&la8!s(ADF0MMPL~T##*ATR*})TtYgs5CXbWb+e#_-k(%PzwU3VK8FdLTTr|?vl z1xUsHANkSYu=>-Gf}ErJschac4dL-!RQ*TgD|hmJisRP!DN57r(TK;vO3qT#u^&k= zFMBg^msQ?U&uz~y?ty6-2&wVIFVuC?3+j$Fd0&6OdDO*Hc+H2Da_a=@sg6U+-h=8H z)qVx`@@yIN;sOag*$<~zmjO$L_`ckwW$@$6DVEax3!s8V2QR}cCjX33NnqeW< z$oI|?-)otCav$aDO`i#U3LTvH*)zNH*(~61!wpZhWv~Gi9u0S7YWoE3Ww~ zJ~^f&MZ$i(!*g@v>w!>Z)2$^FFn$%wEtb9S&*hoKe&2q|77LT=Se0F zqyDSc`}w?CT49R@5f#gBG$K{7iP=Pdil5%ihuZanK<8^5WbgqI^p(Dk(a^h>GG4ZG z8uY`WNzRUIr*6_@GkJ;5N6_E~X|&1S^41PEA`Z15eEAQTZ_*`NE4adT%rVv@zlKUY z3mdgoj49B-4&u_el@jE9WL!3oJlYp4bFVc>(BMHJ}qK&Kl~rt2delPe*H=4-(8vAya&; zoxIl=*0B~aaWIR!`coxF6drCpw@zgGpb<(VJlsL+Np-~?D_w3=#@Ya2nAS|4iqGrE z=X%fWmRGO|N+;;V1lOmz*8P*CwiwV_Lhkm}@rj;Jo~LFb)OI`*Sm2FxGj9_GennC> z(wkda=JCP?HvIZ;ENwKI?6eP4rbW)U%ht`h9U4%^ zQ4KTe%5T&sh-S6Wn6>;E`w@ZqK}l{b8-oxnqCovp#~lIV5Z_FD|9W-mp#FMTS_bq! z>XxzU0y>)S$e4C{H(p~je+^Ahn$Tf{`>=P%mhR1iAr6str$kLJ_`Nk)c#_A|xM$!k zc^^HVUA=bmC^SPk^=nM45(6)Nm!5k0aG0iHauGW4yI8MHo6Jf<-+5N|(75c2wwt$N zfXC2|&9Sf!L((yid|kg0)9cOQGubqr5^fp6`G@0j{g0J`)vRLH`Y7(bC-9_)c20qT)TPKz%k8%aWj?AtX@}P!nl-BlU#4r2!9iYtayTPN&W){CdH>-8`bpKxSBVfx><%|9$~SBvkTGxJQN21s{Cr zD(9@662l(tCBJ3W7@s&k=0p6#Qrk@g@M8RvoC)n2=dhW zOdL;|7bZY08ZM6wq#q&kI6DslOadCJsAWw-vQ)hxi zn!(IodekwA1$1Cs(h`*0giMB|d+u%#O!(naUSr~cUNGhT7!)~~HEBu_)tt>~p|kH; zC6x#@I`$m9r0-y4{}mxe+78D=t|N@ojq6IpwxIS-3WDVdwML6U{Sp{NA;J&wI`^a> zXLo9;gf!@DbMAElTyE3*7mj5?l%nmlLEhxh>mr9zMAU3%{@QUtYxWwG6T#i$8!fJb zi}V=lCgCVX=QHI!t+(|I^$Y@anyC2J}dprj8i5sh#(7(1$7 ztd{INhc@-)p!ZT494g{W_C&Q}y(v2>k9}!bH?(ZF+fdNW+@UO+$S%MQL-!v1Kz?cK zW`?mIo)OxCI#mm!@p3=f`l7$rvZxM%C`7B0n@hj>srQ^*hepYx>q-iBZy(>3B3m|A zN3f~Oy>o7Zr0HH4j?(bQ8KON1a45+RMSoOtgvI4l9`9!18#uINbD5Q7wuH%I1mAa| z%+IgZ^{A~r$u-d=l8QSp1ol6|*lKifhY;kJ9(2{tR%I2SLB=thLLlu?TFR zj!H-8WKiZs?s?A%XzC(2^4s0{vP+daZi^oi@EEK0996s(qq{?XM2zL?71A|eD;ejb zU_^0zIXy^5P(=|}fW%#G3sl>J4S}*P@9{;O~i>lbaN|TQ^{TlQnk=iW>A^Xy-VCSQDLQDArZ!3`3I`)l`*4Y$3wU znJ8s$CLeF-ad;?kRx%qJF{{`{%;VRbuQm4yS*^=!kK5SE8IsLZLr^fLxHvxCD=5}# zwq)#1pid@1x?YDcqLzW@%yK+Sya+F^lfa^)nO|eW|XYg=OK-+dBe10Zn z7}}m8x_!$~ZVJo?zM2&x5MyJNbidmd#i(_0!r)$H+{=!H!T`@_VP(HD~+7g zg#^lny)0)LA~Hht;%LYLUC=$b%UFkk*Mgs1iHmq9x=ygXoQZ<8oGQ9fuQ*Sb^J=aj?ugORfXXP3* zVhvcmJa8}b_s8E)AyY1aKDHNCBWMu~ms?iA+g5`v-(7|%C*Gn zFX0=7?R;7$iS{3{-E#)oBg-$U!7S16-B5aua94Xtln%=2nbowlw-c@XLyzVI&>0|= z{&GfbZ#<8)^lec%Cmx)=DWHznI(TCeu1K{WS9^bcmnMy8>cr4CXlsgDteEKc)KK0_ zC1^>*Yw)1GqdBwuERaX~r`t#+)q65z=x~Vkf`_34HVVhP%7$A**avQ<%4@!jv2%72R?Q(;{Ws(j_NvdhKvl3LW_5SLu-v8 zk}jn5hmg&;M*AhVK&uifM0U6jB|+9|Kbvn8w&t~~)v;fFYlXsHY{#{PHm`iv@nM=G zFO#|#;S96kFnu9ugj4{wba8wt`2Mzam7q#Nz0Kt>2Y#Z6eb-$mgJ^#>OxSF1!KlN< z^906$v1Do+Ls+%;b?jMOg@Ubn=LO%Z|Lj#(;lV0a@u~bB&CCx6p0s^Er<$EyRfYVj zUKu-W3`r>yB}|LDx|%%WFB)Uit<;Xz*`_)(lwt2NmQL+*L9`B_cM;y*Z6WYaui~?s zt@W9a)~vA1_kc6IK6=+l!l9dYJjGGdMw_MAP!C^Cv$)G>b~E^iyLf`3lXa>5A-{aw z1LcN`FyV6!Xc)EI20m}NuJ|&?&vg5OU7xT4AsG| zHAKe`;LqbrLCNr$1D8?wQb9rE{;Lo^JIFlt6AYO|_D5Y{>Xf1%xYrVE7J1R%3BB4N zOH;{$1VSoI24}w|47cDp)vzrEgF>wr7q{WI+&l@BWBd0IqVf zS5mO4e~22BY}%lS?caSnh0Z?*?;-9=v=p);OEp=WQCa`;Qs?6>{uS5ViL}QY>9g^O z$N|PKsbnKjx0%d?{)BH21bdd`z3#^THJvxOAaNHpJW&Qq;AJ1oCI+_DwS@|1k05mQcgrnTQG47*FS%3Y(Sp?co4E~$#L}9n}(GSN! zAGr9g%`@_akO$!vZeMDOabr(4m;3%5F_692VYVL&hjTXy-z!4Ww`q8I1bE=Ne%-CD zPEfatGe@&&m{oR5FFEAGo$#kyNH<1Z30<8w4=$=gKnU4(-i@RyFT%0%cx+#3UF-6C zvZkqkN%rcLUO?)~7XxRc-(QdA*;NKAWO=fA#Jx{DlY(D^E$iVsY)8@Asa18z;PGDd z2g@VfUUo7HkaFKndE!_m2#Oyb&w3D5dTWJPxZ@keg zbLw7OU&eCHb`N<&z4`H5^GB9zCg1s|O0$KkH&8Kd^lM6hyu8vt&LP*43i@h48v}%- z{w+*^&O8r}v61j5XkC=&|GvC*9>QMF>z4kRI0cvL#8%vyIB2NM{dBEDGFgB)SC(sG z*^R5C|9IU~W`|oxot;Y_-wvdb_fnt#&-6&^xz}K-t7}>)rlI{-4FDm@+;f6iU==dh zN}LUY^)~Gv?3dJcP1JCdjC&Je?S!q*w<`S8HMC0KeAa}N8A(gXN3-iR4?2z3yPqdO zE07olbE(@M%+~w%6BlO(b{9={eMWuzJQ{?kT}YH{&UMy##V>u&Vy@8$oeF)(f^fJ5BgNU1$d?lBHXY0x60unr`P+zvT>bS z5e_qwIkX;d-YlH;vJL4I<1PSI{JtOk;ay)dq4>#@_EP|$P*a~ii`b*E5x>+1_20uy z4)TFRV=b#UKS)PFDbQD&Ak0AY@cxtcCm6>^6@fl%3G!nBIFmmj{a9MdPv*AGe1DAo zICP9pQZzkx(veH#;twjvcIeyU;~k6cie8r zPx-}q9YMXy%5nUc$s{hhtr}vgbHSZ7tLDM|P@Ts}M>-KTu8@=4c$FTxkxMg{YykiR zoBLG(d+GA-wE)00SqMlZ+!wX|_^n+t7ei`2wm=mJNlmG#==PBFr?!<7Df4oFB+$I}PB!i3NoJFJjV+JelEx@K8)2m8}|hTyW5@ z4Cg)ule^?4FO1S8h>RL%=D}5S0{2BM$ZRG`OjfiBrUBaxL=d@$>5q$7&s*F(nqXdGMc6MfCRE{E$`*?nzep=bTmC^@!_u|MmpxG*GKF-s`ZuWu;eA| z=v&2iMIPtCFQkRL<3g}5X+a}UgWdQy<77R`*v6%Olb|K@n7hS}3Mb4kqc$4}(v07d zpmg}oWb&Ef$POgni+uGFW5o^aU;Khf(TMNeJ=i)=H^gIoX^0z5YxhB^2q%6`(+0$c zTI;eSg?KEqYhFBN_IXY7nOo)~xA3!6<3x0WT3mzh;Scf>@~6`DgG{HIZ}$fdzf7oi zIIR*i>P|z!&?*syZ+iNUj~WrMyN_0Y1`5(~k*=b(S!~acfx^}PW$_N z0E|i%S=Be6&vRKxwxuvG5uC`|=oD;rOXn@KpDFMw#)Z%zHRol(ww23#SY!Lx(PGsvF_Qk3mh<2Ho$sCbmTO7zBuJaBIscZJ^5g0gK2 zQ1JndR(=>7@eC$iFBz8*iLHXy{-7+!67Uh{l|T&i7U* z=F&o=5^d9G_^!%t6uu^IF_A|17N_xRQ+iE3$;8IFfc&boSK}C7heW<|;vkvlU!_t#fG zqF{7xL+%nWPNOq4XnqF$cRJfm%+2}CS$2Uhuv46Ni~tMBIP-nYq-&*#KXBAt)PvlP zg>6ag5k2ui4cWJ|)PaBCejq5-@#qi@7Lf)SZ@FLY0ouq0zRPaq-y=7!|MhqhP1_&Z=%YVr7|BhVrM$BejAYrD6?d;W|N36%}wlr zy4kqc)8&%+<&B&xf@AKh^0+6YIx@-@a#+emapTql3G>s}U5A^;!{heX&>h09v;oNt^yy`rkw7AY zUW}6mK;VkrcjKBJYm-f8@fJIPhHN+2l* z^Z6Uz()fILx1QX#+{nE+Ov-Ii=Ho0qXAg2~3i1som@aJ{qcUqmnRjXl=N z7I){}_tX9?!xHzO^J zTQXur3)QXK;%IY8Kpc5m%%#&uNs9v=?A{DmS;->eaN!iNU5QCD94V46;WiY2Ad(KM z_B3s>a@iS}#F!NZYs;QrJu>}kvJ$m7)#msXZ#``y?NVVq!-f=7YI|B}jnMvWG`dfk za+-+eq$Lro44J+FocD3p?G(8L0j?F7xp{wDeA$xn4jPS-ntMYT@;He1kK=hv6ucYO z$dbf;F_%t>iE|8^T>#iM;IWg78JZS#R=Q-u+Pqp;~`iIYba zvMnend^A{FRNh_Q(m}q>02TNeTkE~5HtV@|k~QED=mGP8ut3FTzLI3h&po4oNHEkJ zA2MUCulS%W6{$`cFU(kkXN+2m(caHTxnJF_nO#f}HZj>-&z`t?9kSZo9J!1HMRf?q zxrjWe#sK<9n`Tue?*0{i>Q1DGN1rNu_4vNs6G|8fDRx1_l$J7)L3 zmF+~HTqO@=#sMy7cd_hwQOFMS3a!Cm_I+UYN{h7`t&ZD|4czV|?)YBiC1bD@RK99b zb{)A_y{huDy8Z3V;HYo0R_;_+qUpvxNmjKAR5By?laU2;YX)-5P&qsJ`qwQZzTN6( z$QK3s;>!Xp<15c@;R-(8%t=5~&dox(e|IsIM=~hHFM%j!+T(l+;3NW3jL;&ORDJTb za7IOZ1ZK%r{rR@wRfeGxzQCLpRkD^<9WQJ4RM>T;eZ=Kbc}=GoC|!y!gpl|L`*o#H zZ^quUE!*_wbaF8N-g3nu-cr8R)$c#VcI6GKmN(qz#VfNsaFoC71mT(qd^Ynx6yGU2 zrYFU4b*v(p0uHFyBy4Z06MBn4jr)m*6uyH?So_))uD4@wL{ zjTvikh`Ms(`RGsuR5=XE0x2`v8?ZPWD-FI3bO1FCt|Ghpi6L1iZO>_zb`>;i)-==| zIN{HUkR-DZJ{SIk7iy4{_o8~0Q!nxjXcapcw2VV#Yu4)*P7DgCdcU63I?L9oSkV@Lw2Y1C z$eO}nUQgAcNf9ZZ*v26gOC{W&|pA<|oKg)_)w15@KsRc_ud z=!DKJg7Q`EXv3s}$cLL!YQL3B?;2D3WF=vg9u~GzSBnE`*9%7kOJ1T1P6I-N+OtU@ zO^@PN)DKS?G9qe;V>#MB*8}kUr?>m%pw9E5cPw>3oN)B+Z+-HXr61vbA-~+WA>%~~ z81DbUQdi;+Xj^;rq(V7~>s%sD5kREz7)&#T51ENpM#e0@V#*b*oJP8V8ue4mu(6N~ z2R0#e()z1^XD0C**(uTjxx=~!lV#oU%|1#B_Z+1*YPm*4?PN4$^uOnR$Fh9v;CY<_Hkwdlh z#u7;~ci?Mw#i8hlO4rzqv$oV73hwc9)t8#uP&`(qmKeIFrTpn|v|eDaWl>cg5C_tR z{G^Ot1#;DNW{wtxGOqMW6@G8E2aQ@@_!HB~Jn42sQf2`c{5b_P>PO?`UWt(<#|u?F z3dFZ+^B&ptFs9vZ48mKhG5z0>NlVUy>oZqeL_knW7l~yD{kEj>*uda zSxxbYo-JmC>(ZOfR!<9M{P?S*2zXHP)fEbXtRtrdL7k^?(Vd#JBA;m#(*C62_7Uc8 zCj#=bZ7Ge4{p8*xsWV}?Vgw$a7C$VMo;%Ixlm|d!KjQiOs%WmJUY{*|m~oh5JN1w3 zj@I)azrr*2ZG62Hsor~5lh(0|OUcKYnWt0ZU?6SU0;RoVYfO&CZH~^ObD@r38q4oJbNuukYBGS3v>YGx_Ya0M>X}`OzaioWxBYQteRoj9x4N8zb5M z2t7?PB6{|e=}@gHdYBO8#nfAn8Cn~*Co#!Isd3+>-U>1{|zhR#{*}jeURy(&a2e1_A6Bu3W z-7f3Q9DIhOwEk-JVYkSeOIYaRCP~7u(fgLY74{vj!wYS)XbqrgW_CEBPXl*tKdJE9 z|0*a~REwk3^*^G?BM+WpWLb5jYv6Yq`yEbMGr-nC3aw;Af;rh0++97L-BaXEcd6zjd=4NH5_b+E<~}!tYlA znA*ooDQNrprOM>Zm{lwBV*N5Js7MEgCnL#?Tq8F#{i=WWqtb znOvHfA=Gnq^#;gg!-^Y?3yb~2QvwQPxG7<-at4ee?!DT@UY=l)xYZD{vf1T{$VGTw+9cm%H-Qg7wExW15ldx$g;4t?1wm8kO*I+NK%4G+ErY@ z)@XhlrTdo)(;9j1EB@e`{#8}eeurxObS4wIv8_qZcUC)x4^_>83rY9#yRho}<8)BE zHzM0sF3d-xgT>Mw&Vy78FOvXD`9GR8G!08bAnz4V6wi0e2wM16iQW_@+ymeoxqBm? zVQ=3lr1+8n39_35D?fDXts-%PuUVk6nVr?==9kC8FHf57h*%7W805a6)s%6fOJO{9 zl+v&tc`-sD`Z~0f$w>Md(@H}4ivaWsoj_8P(6&!Sn&ri%PZ88etfENS%XazQ_1^RaweVz>>29;0yq|w2R6z%558R(*JaDWth$M?!kgq z=6gkp^b9%AzEyOGZ6|>C#PL}mEcRIdfgv7cDJp<8S2CCqbE^X2jl*tZzLlU5dHa!7 z$#d^VXcLz!LxXQ0P2?HjEreE2A@Lh^EkfFXr{ENxl>u1f?pixFi36D+QCVVD+Z;AX zi91m)rk zku30+f9N8Hxc8#!X zPN&dU9wYC-gpu=~lawhRScsN8p9mfAEzWJ2xM2i4wAeUHWE^q9QLp#QV9?OkWwqz|v9l{`(vWfLpQ3H}|iG)RQl<-9%&>+No;v@z5n)QBnXbX6ALvL*n|H9{*?6J`PkFeU)g;)7_u)YLc!tia(obK|~v%!8Fab;FL3 zA7>@u3PQ{mPanzN6{`YC4z`;~U$4hJMTf`b(bycn@~+j8S~p)*`GqGq__kyKQu@n@ zwBZ#!6GoG|9goRXM%3Tibz-l`ZoGn^OZITWsU3P(6Fj8^O=>e@Yumt)x<8d6VGP_h z*ZNKj&~S_iqx1(TZb(3&9*C03t;Hq`4^X;V5T@X|E&NtCT0L>rCFrj3+ir0r0J(N0 z7s-+`O|BHTTn6g|mnu1Q&-)lt{Z&Fm&Y=nW_fm8q(r)_mXgFj-v!I>Zwdyz)!D6?K zqt#0*sWZr~hO%k(;fr3xFCN)qXP-Jq0|03|2S;;e7J|dq+_QiM!2Do zk^K#Sd|14IPv*dS_Y=Ryd#p`gcnM7=O^$devd>LspU3$kF;En)wa1?S>ec+-|eL*Xxvs4py1UNH{dQ-B3qt=`}sPZd3w2d%&t;e98j~1TXl+ zT`}^bh2WozzgGLY;8FYqDu2~p(PpO~W-1{l=`9#Ao+-W-B%W4n^Zk%+xq_V<;Rf0m z1_9XhMH1hJ09i2w0+Df>`+!MpS2V+}l~oW;OS|bdbkMGlb1ChLwKBPGYe6YwIm*^< z{vtj_gc)9GJo*r)IqNrjP@IvucY#t0oa%eVA||*rcq2AZUOxtL0om-KTPDSKdW=VP z@(q5NyQz>jKEL`-x2>X$17q?>`62_nG=$lUDfPUJ5%ZGD$kVcF)6E9&s^@K@S##Uk z;q*oS(^$j9MoCXiy+oU}V)0r<9eBO2C(NGPXnYTML3PL;3A8Si94SoHyUnv@QvPzj zOBw-=l9C%Qtzcy@I%xc_?bYiyuH{Ia9Gh(&mC$;>@=v?6x4_dg1#D^oY|5%m&qoKA z*0p7`u}V{_9KEVF2~MGV71AG6qnWb8%LN3XjCwJYE*wOuL9Q41b7@$d4r_>xWK-7* z!Gn+l&~}#IFul9R8@8MuuC2%Y*YGxVir2AtH+l5vsM|r&J@YaPL+QXPN@TOqyD7h16Gw^8>zVmY z6+kp5{hb7eQP18Wx^M5S^S)&AWP^L!DX|j3F#jX&4YDgh`S(V4J9R1yTwf3EzqejYF=nXSwxqQt78CoUZHP7^^59kV*?-O zA?Rr>V=s~~T00qekEG~z>X_tU=Du!7dQjYzgZF%=X(qU#5Cha5Qpuk^f3!M)M&dU~ zR!!XFy+eHidbs^tE($)go0}gEk&I{Jlmx)cZmF;@5U27ODq8V*^kvOaIsmwcv|asp z)%`d|R^y#|gwY(}N5@fRJh2q|-*|+4sNcS~*u`OB=nY%m4Cn|UaDx#5V|K*7+5Sov zf9c%PIEG|7(~HH5xK~fwc5jlnP-xq2Pv^Om#{|=g*3VUdlSN5!09jjAu9zQB*s6ev zoX&ifqRA#<@5zyqC2u8mB3nXS#@Y1=^B)FTNnvAX^_N3`2#Z1Wi7xYo0ymzUg}ueWS7sb(~gQXx#> zI)weYeC*ms^%XUf#Xr=*v8oci5i=x1es#rAw`^mBa+nQ5>$$vy@Eds1WXaD!dxNw6 zD@bE)uQ8$MDZp|~!*60dWKpMK6sHcBj&|=|f~AF;bSinE0tiPuO@=H zRRKw5*mUdZ;HZel##h;q0<6uLgCpw3Z6;UhYCG%NxHU(B#7FGEsoVLceYY_LJS$$G zmV>~JH<%+s{xR{Uwqs*4B+toT{jryijnKZgs!7j_F+Y_Zd>`O6>P-K=y<#V@7pU)0~ahp!r>YZvXDX>nugG(ua;tMfZwX3R#nelLYOIS8pS^ zCsC{#UnWaURd*>{=K?wEuh-e)HBpMB!Ds+!pl4IS5 z^j&GOT9S7%;UOX%S&SieWbn-+$p+1ZpP2j9`6)P);{XxdpT-&{S69GpM1+>GB>!)8 z7u2?sRMH>HvLHA$YVawhKKyaMQcao#eV*wVSiI*?_pTYO+Uh zu$MLj>$>P<*0qoh6Qu(+(P<&TY_8GUY@s#440LeTKe7~XZVRA;L!4B)#rF$eVKLFYn{|__{3KmBP zqHf<#`HE5F;09mIoPQBMF=f?|&j%>LFnbt^!L=t8rWRv5j%<}QmLDEOfmGG^4VF*% z1+*QtV^{2}N*8!<05H=2pWplc3nhyPVzG;UK(H9UozqRH+f*j!si^b7YF4{$w;5;t zMHppBNoT`yf}maV;Oi)`_qc*EJPZ;__1*L8#sUV_Oa;6`rAYvNgEwaUo8FjkI9HuR z;8mtkcT|quO7A*g&%VGRVZB~>*F-f#P8o~J+Dp@>?RM}{|J;X|huf7Bmlp?p^BhAH zqtltWLn;5Qx5OkTV{! zG`!JW2>2KO{e!^E`6z8_=O7NDr?!H(Az`ie1y!H2@L|7%*=((I>e#+H814&j&>H6* z!)b+{>sFcBYJaNP@#e&OsOZ)8x-K5*)cAb~3m|ub`i}t0KLRNK2%!94_>ePxc6H@TrKggX(DNZ7yEWQAI$x)b{Hu!CU+AL)U?>bNUBpgBk@rL~}AYG$I<_!8(dVh#`Zab>W$Pag}R0g=Z3iUChK zpcg4r&6x>g@L<&2GPE5R9mBolUd7d0m;UbB_`xEE*as44^*mCO;yM;~pe0#iBL*yOI z&YxO&K2z#lsfL!-CJ-Mj@~%`Yxs3e9XSUko9#ls#wP9}kSEbtwT9nC|eBL4<$dTM5 zeu5WL^7u3Rvv8TZPApvR-?mNYf;d=4=dVz0|hrK;vg_ zQ68V#Zb;aii0qy9K7+40TntR-sYLYrt`9N0qs3d z#f!k2#J3OhWxK?-x0x62rOKcxXhlw#h08LruQR`f_6I>ANT-CSHVsJc?T=z-)wDsC00MGd z=n((vwx%c#zv{|e8sy;AUm>5$Cv5kzsWzUKy)^-l1Ac0^{*=Od!{q~nmB2G3I*|T$ z4f9SyZD$IRZU!o?G(NxtehsHeV2c0@7VErH4>`;?vZN&Wm0ov%Fy4~)|7l7UfPmmn zIoY2IWu;_IK=ubbm1=2Il&+V`&R*LQ+Y-?d8G|&Wd|$#V&K-t89Ii~zu>7}lp=OXi&_q(0UF_%L zpDE$<7QfDD#l8A2+O$zBOakca{TdEXc| zD+%quQ;XJORdJVRUfbMR28%!(DNX!SLQ^8}7Kjf`o$*vc%Neet44l{J7v(V%ce(LS zpV@-3{TfbZo2e(#FM(L%FMq8rdeer13&b0l3I6XR2j+q0x5}G#fH{_e)Cc~lZ1~Ue zLq?N-mf!#1m!EtqXwgDODy;PEXY9wa$MigfFH|*E&Vd-#NH<4A-g|uN93TOcR#3g3 zlcIx#NAZ=$^TOun2S4;(k_Q>sVg8;tKZ~t2`qD6X?u*Tv>B?fzv0)nlz2-tpT(Xqq z-b6}WKg@Vu^C&ZLVj`n%eIz$CLq47)YH(p3K#9W>Cdv7u+IBw@Tse|&t7|J$TQ{P5 zleJ+Xl~ezws2UNmS+$Zh`vFBpqfoaFdvYa+r}w(7sr|%4TOj)9HgKNtdH?%#f&K6F z(dX34fXDcEg~N0?eBnc-BQTf#>PJJpqKFG0hqdV{yO%O7#tm2STp@7M7H5+E27r|7 zKu@!Q0Ed0v@ET z!>*W?@Y!Ox0dszg{o3ID6_G)U`m^O6lfERg)7^P9ASJjC6y}sjb>4oSbbaW;zSc|4Ngx{ebyUyuG66c4tFCx? z3JXo`*f;ze({jSF=b#gF9pj#DXW%-h#kG*!c*~h&0 zp!cD{+TZ+cpi6WP$fF}Uxdm}{`Czi%rFU+DR;CiNXU0GCHXQ5*xoE}BpYBKuvlP}6 zUe~oKg?b`%*t9D-u`V!_;p?(Z8=U<45rkOmV} zq@|?{kPa1)4(aX=K|!PxkS;|;K)PF6T9B@Tw4`*yy}w7D%eiKL_x(J7+}E?7wXT0= zEytO2hh{C7T81vhupepTfWQsPun7t*qV*$|Uf5nry2y4UIo9rKku-7b2hioB5v%Tz zEf)uisBZCz9JX^%Z@GfpaW;i?It7H>a(uQdhn&YIrAO@e9J6Na zQ8w$d?gDaxOyDG*M%29EDNc9VN-69^Jk+2eL=2c>t<8ehj-Y$j1q73MG;P+G7X z4V(=wZM#3Nhtl3NO$X8hn_g2iPy3SmeE&Craa-}n1$mZVXAdH|Qb7DfIut}enSQz} z_dtBTYlkv3aYL{mVD?axd3?#}*zDz3qeGkiTC+{1aSG!!?#ADg3@e=Whh@4!75ivS z3ag%i=~C$qzeU^tOeffeR@;sJ-SW={VCc^J7uO$9`cx5?8zX2w)CLcj_F6X z77gY1!MLNKlHVse%rfPYXpZ30$V&^24d*K@xieJw#yQdm(3d(DQWhrKKB;`3_fL&- zq;>ME@feP3eh3tt16lJjS3V;A{MQB35H5@$|Pu?46Q5p&0Qp=oF@Td zTpiyPe98T&rs~R;^%AzunJ?zo#jGRb9f8UrMzMN*{Tf$gfZd~?bB8RBCC;gQ%-I+L zdTd+c&lZyjjEWEQ9}gHcwBDcg3sgu{FX;e0=lIPQV@4sdXs*iN`I1UUaLEQ29Pg%S z4(abmCB4SD^p*;<2^s_I3~Z;3dGfFxQ@=Be^7?U(dR0qv+T#IS3herA$1^nxfaS|D z%?15}tJ3LqgbW>n@~ZN}1#$zj|7@tA|3`#c-bSYTK11K*JFBof^?}dG+Dr$w?ZHHd zOm*jEg9}lD&HQ=-1<1%>OmunszHH>Wf^;AZcYN^c)3)&|2#oI!v7EYUe>z-mgD ze{gK=pNNtV+QzPPfB$f5mbSet4LByJML3Q1)%We8*gUem8L&cGGe$9 zW&oij6f*YB;5=^x5Gju~!m5~5iXl}zUt9rIjD`<2RM!%*Pd#~g&FmSgi&XD3zXjJl zFt^B=2KC+PPE*`E6=b@31qi4fFD?qcv#jFRD$xT}ma0--j+6)1>n$4G!fVfr1)j0P z|N9*69eu#$3taIJiCjAL)?Rr!B#Z{Tu&6Qn!Pd^SEe{B z-BF?~n|&jS5DSf27+i9fYmJO&xba!R7_&)kI#5nT2Pa=g02Tq9pQC<1d6ap-c$7k#Svyh9cEy-mD|)?0?C$%-osxgN%y9ew;Nl-iz>dUA;3pbqH$zzK6G zKZSgykMHqaq|$VbofWBG6hVCZrd=-OF*!$qP{R%Z%A+@Fdsi**JxmuKyrZK+ij0#k zeRFo{4Ot|p|2mK*@o6=83b$Bz-t2L4RDn$8o^U?DPQ)^=<&{@8p}%1#33X0SJo*hg z*#&>yBq5=ioDmW3>xZu8G748 z({tU3I41f9>gT%fNVV0aFrVKZ*~_|TEi?2{m0+BALW`@BC>C*{*8ePI@Fy&d@J~A> zWay|`%(sAeMD07}ANL%Fktny`e{}E zn**`HuT_CpI#4vAm(~({opdT;G$MhcS2cIkJ}sjpb+#`%k%HSIA&T4b4F#Xwhmwh4 z2~TRV71ye*2Lw%GUig8T9uvu==pI@;MY*_qbrjt2KDr0ZpYm z160cMB>zOEgonn`&5K-G4vJAOZ-t`!^SFnNmQ2~Rhn$jIyGPyC!tVtphViOdJgUdV zN;<{9Gr84;*G1ntc74WX`}TTsW+f~(5D#SP(x`qnrhXvHPBOL*LG+FTD~@E*<&!br zxQ$gw6E0$^l#dJifL3DG0<|95ZRseW@%~8I8e!W z@rU*EL6J!s*TJvVYoaa#0}Yw9-S^UmG?OGX)Xfv~AM`c{DQ*hbrgxxlfNnz7H%7gQ zFx4HmS+*8V#aeoMCqNuC8HPKu=fm-27eN=ZVAS33x|vW)R^7`ouq zl9gJ1a8~))e$KZ0!tDJjk}0_!rjHhlgHI$i%It;O^phDtRvN25bId<_`LyFIA8qk@ zb`4zcgrk_cHdj(sjv+C>bN;d3gGI6}Y!=-yplD!(e}T zHNTELp+Oj#BG-9bzDaqMa<^w$7o$zI*PB{YQo!=P@rr`KR~toS|lXj+@9u$Y_zHo zA@DTSlycK7RV(;ZySEq32R287+2HFb1XT z!13iMPUB$T-4UOK4+yorqZa@LDOCLCyo}a@HuILxI+D*M2*9?d|Dq62&Z=9a5{-ok zWg_9Tz;56|odwM1YxE9*_gxaYMGzYqKY7w+qnq&S;KLNNUP;a!;m|jCs85=7y_&mr zM~z3)H-A=>1VZ)&EUOgeLZ=}mrNHaP& z2?#zS(Y2p|1Zx8A$9sR$<4R`?a_am@)Al!-rOycM@2q!=((01L$axuDoW&mlv}K~{ zn@U4Ovp*UB;m!5`BRp53Lc;mU?l5~y@lAoC_rl{E3Zik*&WrCE!A@{5 zF}-V}6YX!mKbyoQzy^?+j7Ak!r#08>d(p3UdRq>`Ei1>dJL=O(?{^9N@)40o_Iky( zUHDa;D9A$0#wDdQM=x8tJ%0c#sk2ZALCsbN$db(@>W^^)66#+0LEFue1%Lmg6Q!oQ zYbl(7B?mbF>+dP)T{EI6mYutgIdgTt&gPW1H`Q8(?}L5b;0+&&syF?I4Ni<^P2OnMf35l!$yg`G`FeFUYsb z?(d<*S@-_*0-vn$qtY-PW>cGKVe{IOM0nu1qyUDt)oLaxr@RtJp?d2Im{kf!77h zh#BMq7sd-s=fKzb3S5sbKp_oc)Yf7eG65F{r6}X=hV>75o#IAqG4mgjnY=U(3T~YA zk2qfHJweN04fZ#mq`LG6X$m}*==qa&0RS%dhGkCr6*UZ9dzf=O-z3u2!4nw|Bt1-1 zpj7;FHjwB0$0OFW`7m@%L-0^E&GC#&SfSc|;6ysue5l13id^**gWiGf|CY?^BYGxV zsgdU$2lH}OM=>?44Oco;`7ZC8+~kXP83S1{4+1nka}aS>XV(s`n{A9nubMew4NJu- z^(5$q>!GA-sk&QChv=8hgE(@Ok+jR~8!Ak3gm)4@FvZ+Ck$AaU#ZyFZsDWUVV z6Nx;#sYGn7g}SFGZN*Bs1Js(=q;(zKbx1iw;)$V>t_c;db}+Ct*qp`m)}e4APSA8V zGv&TDGC@QNPglP$q{AZ|MD0dyS1!RN6>8|U@ZdK^0pLOmXSCK+y&(ZaCm?|OI_@rM zKdA_a&5(5Cya`abk8-{{~%ePf)yZy#e6-J>rClZcX{Yy8Qcz z)uF~F!oN}iwsVqTZLVAJ2AvZu<5Mo#iGFRQ2Yby+2%J)AE^zaGonJWh-2QM-C2=Ty zD{^FosM8y^jQ{P(Iy1~V{$1J>T*B`oY~pKFC|!TnPZF8s>Qvt;(P?u{Pm^V; zJNiMnQ?p&2VOP=Xj^xv_9;f9qFm$I{B7y$_q!wqTsC?l3q;xB&Fg9dLgfjtD%ort5 znts9n2H;Ba2HQ7}Ka-fv*+bdSu z;zLk@QK4o22mSVW?EpPkjKC1lkT{`B!NYT5Vsr7vbcN*Cr1{a@B>Kl9SjQAz2XVt2d+BM&I)GMwvidIho8x~F85eI89#^(&CdNcX4Ctr z_bKr1>61U4UanWFQGmPKoj$i-MlZYD=WoUHFTlXq?+nW&{+&!6e*uIR*45BncHa2| zbKLMwyILQ~5nJhjhV1T=g1Mzwy*12zAAVY=SB0yuOkMY8KZRTV1YN?`K8eks3QM4~ zWML_s!W9_hlhtotNx>oQnzFlvhKgXcbmYY|R%@X}4#_&oc!>Tac!EE04@jy^CMz9O zwu=#;bJxtS*uJFmA^$u0(v{SSAzx>mvpGK6z#~6IU{wO*XPWGMK520aXKd5=P$+2@64!*m^|1T+X&S7d5!>|21El8p zBuM-F*|>)EG5Gos50Y*rQ%yG)iV<)2kE(`qy3hD79=+T#tmG}}!nisOm^okk5H9p; z-UIQ)Oq)8AC%U$_33)2DCQ({>6v+_4F*v))eu>7lI#ewl>7OC)e{Igz?VD zbkw8hfn*tlF zxX?OXu{EMr84Is25D0X8Gwuo&$h2tM`J@e=H?~;}O?V6$Cu7c`OuoFEoyowx&uBYO_UEYiIjUz1S?-348QevaM+iih`{lE0N$`9j?V%U=C!J9P%` zo--pOGl^A7YP%Vg%e+888@<`0b}B(L} z7LUOCTrXi@lbCkUjg`A?93P=)Dtz8r_&?+7{3w3n>Y58eLUNSzEH5oUpL9I%IxqS&JifSw2>?y*cTE8KbwNfY+5g$Pv;FcokKJ#w2g-i@-C3 z;YZ#ar~Ll+FFDhve`8|)YK{7j!uq-I`l-O?{F;Mco>`0BbgsN9zgaM^!MVccV$=)B zhXN%v%jtXQe2l54Y7*^1XAArMD!nH;W4>ZW6NajOj!_Vx^nUi9x za7Sklk@|DM^!RTW%xyZMNZt#(mbiAG5LPA!gouixP=G z)JgC5ys(bsiJ%83u?$Pg!-Kj(mGz6|@fAe%BN4pmU^9*9-!3UY@k}hAaqj@w`9Gh3 zGNm`1i6yBp@m%t3=hro*$pO{=3`m*3cT0YYbpDo6jiN_{Q#RCGTSfjXrZw{QUVn`k zXJUbY!*|Q16s~WPX|XEibFn9^E?eKh|JXp|Y!WB~MiQ^Y2f?4SbsA!fJ{K%ny)@wOnaWP_MztNC= znFqEPv)d0>z0_Fqw8H(Z&ZNMVi&@na@-H4^V2-Ljq&BwLGTc?ReR5pCZ!F@6ix4!CAd^_ZX*x)QZEFp!5+^2JVABu; zg+;ol`Xi|>^=%ZIWg>vYL-b+WtW4gBo=|Lsrok*^z)f{6dLZ}9$wb)>4mJV=>Q=?zcEzwHEF{<@<6A}EJ*xpEv%I{IDnYo9r`m7jc zk1bd7e$-1mR;cP{Iw11WNykLM_y7(6emDs3S-icA2xwx9i+BJe9C&+DKslngez_qz zLiQ30fC0*dZLt&c|H9j8JyQW3p7pFptgJ=t3uaj855tU8U&Bw*qKW5FHAHVk@8%Ly zuz5(lBR%)-m$m4Np%;XTw|Y!yIb-R?|Dn6Fxbk9tgpRGh%2jhEjRY#gV!myNuu=i` zY7f*tA1-@ZU6XH*j2|vOA)1#r`@*c_w5k4T^{Ww*3-PW&LwAM+SLw#hq1kw4W9cM3 z(MQl%BU7#hXv$?$fBe(^16lXP^KKl2Cb_4feJ*}~Hg))z^O?}yi$aevga?61^<^f6 z7(Ie&STtEnUVi}TYVYyQ`4bbU{JJR3JFXE~Q?;|1g;C#R@1i%P?G?I=CTMmr{!Y+@ z*N#T+{FnL;`#7;hZH`A+$i1lT5kPRA>I~4tVUHc59=G0&)m}^CcpnYZ*n(rS7`2|p znT2|E_1h*z+a;-oUgiFk=_Ehb18qOn17fK0lShrCl^z`rjJ?BR{>9tAq>=dqEUSnU zh@nePnvJ~p`;&-q2}di-O`SH&=7ZTvRU27ErJ-P1uasN<$6p#pbFC=Zc@2_VZ8xHJE6e#^uiH}ur5<*Z?+E)tOkc4*CyT_Q7wGAWJ4m5RyA=wQrtX{iY~LR{05?C;4-HsSoiVpI}y5MAo;ioq5Z?t(Lm< zkzaVMm4*?2!Hal`c8>f3RTja_HR6u?@tATu83x_6zH`;GN~z|ZK%kmGLiRQ&&F|ItvQ_tr z87p+Z^aP4P@5#7%#pc7UWN}9IvbXa(-N=<{;daBRYT*#Vk0$_ZY`0xXgC(s#OBPd0oenRXMM}BY$^zMa^22cd$=N6Wf>)wl@7gg_SN+ak@G6M z>W)h=mP$PaZ535%9^qVH9AqZJ;vX2Wur(C(uuyxi9suJR)Zff4gLf(P3~Udb_9L>)}14`uXkpx!QA5C)mx}NK3N~VK3l+n?Gc# zup{u7{+M%W_$*a7TvSae=fWZnQC98g0nh`z4_aWg7x6f~efew~KOXIZ45m1f0nz`+ z#XA!xO2hGoqyj|6##Hh-9dFOlsa`Cd1dD~|Mt{DRpJPm}(TW5bSe0ND^jy|kAjWK&@a?ID+e8CF z{F`wdxafFX$q}>$JNFopj zr*F2g^?FbBJoaV1&WXPkzzZFSzp3eJB2Z|$QOZG9d9K%K_?b1kyLS*a(hs9+;f@E^ zDc=o0>mW{)-MGJdz7+?uegKLGhp|Fiz7Qk}R(Glz4|4K};g-(lNGQoW)>N|Y*oy{z z<=OjQW8EYYNJS2{aFlLGMfRG~!eC+hax=>JPBmEIC;MpK_g+hLe|-{D;-$SAObDgm ziWQo+c%Z1no&>0gT;2muOOeVvxF277jlebs{&%oVzCU4`p3#3Z2>f`QjNj%K-<*?f zs&!iDpiIFen&)x56b}XSX7F-*#6Ap&fUZeOn!G!LVz*{C*_bAP*$R%WSX7sYwtVzA zvhvG!U5sm9(g5ucRt2|cX<5g;M?XO@@pBz-`S(i+6nOB05?|L3V-gMAwEH`76S%G5 zAW0s7_+z+Y^MOY9{|SCGib`)Nh8g&nFNm&h-*$x1gx7_5fzSQJ4*b5iohlp5 z`sUhQye} z!K0klelf<=>P{H)v6uk|AJq|_eQUt2cyHbffl$(0JY_S!)R#{$=-K0ONWOq{b* zEZeV;!)5S`4-OH#tP$%U(-lU$Mqgz_&}hXwm8y4*w>mp*7PS8^&O)Cxj^|wfxPvoj zccEj)MkMlphzEssan;=37Ux-{9NwT>8TS3CM^^PhBAu3!&*345CMx?P=C(m^=o zsd$X&hu!_CxesQrHh3@5`F11vaQ~Wrf_$882n zyV6NX`*h^$R`38M%c%g>s+pRrwKf}dWEwUg9XiTIN5ow|s~6zOLO#xvf8R+R?r6Jg zw#QCC>Ir)@b}=`7qrYc{NY`h$plNHI!3d37MD8oRaX)ua4YQV!kd83A9}wzXp>*LG zw4#>Q73q8_bk|e3S7!?_q^HhpUBx1857kAQFcwtem0(r)&FE~p@#^L|0}`|B);L3*D<6!l$TnAL^O zDuRdD4Efis;LeFeL_+gA8Lt*<;m6TbIsUTQ%c=IYaLv3AL0^H!;dXwFajC9GV{)WVQ5TSr!#IF}UtqB38@q}N!Hg)leB#irk$Bu;pAQ5UkJqQD+9t;pTM|QK7=45hTb)r3(ek#V5%4c}s1LE6O1V9s8hx%34xM z*>SFiaaZ>=8H=U|FNE+}9he~cYI}sV=m`31d>7`DnKxP2O%doOgTw|)UTdCVx#Z3FV@JqJM&tzPeDdOo_v2h~?Q53`1pC)dO&?8{^{hIM3{IlY-)V5A!6i zZSvaaClZpa4tR_fU0U!LEs;v+KVuPOQiL9ujc;BpV>fSp<3zq z;Lg^1Voqrer{MrMw_EcB7X(p7GS7U6_eDD4NmhtMH%gr9cQe^-v&C2kgIm)XOuP!W zdd1HMTvXv)2*aM%!$Qej&TrU4e8EPM;4fyhV4Dx>&;Nz++<2`h0H>3fRYl^#OOnbU zrwQ9c!jBCE12{4c`3WE@(7pR_ebMX~AoGuoJM2qV+VyFG%C#Db33l{*d9Lgg=(sh^ zt-H5(snOw+;$SR9((D7%`^_uoNrj_aH~Nz&US2`xVG+<_vz-Q zy>Ym=%HwViZr}%rA^~iN!+C~ZY8$#Qf&dS=JB3szI;p7lnH5b;MPWI}?P|{rhPi|i zIY9E1);_sAv6PStis|ceGnuC<$uF^-id0Y46eC*+L)3>+q+{a%UWc5Po;@x z`W0L^f3x}h*d;{&LHLu{$-40`VkcGAs%u28XXKMhN_isHvV(=F!w(>Lws@>T^*IAF z8nyl>TIa=l)HAA@)wPzx6aZjxB~xwI8Xs<7T$7#c&_9g67kAX30|p_&z#LNx%qu@w zOcsf^f>l#zgXK|~#P0B8c#p-DJntA^EXivJwX-~gfu?-S1fhi3E_dzxfW>hcw?iD~ z7^nECZM}`HSn}Ly(yiLMdVz}W!2UZ;ztKGXOv8E}?#^2~%*CuI3&UkuGyDSemvm)_ zJIf~M>i%&qVR&j$>r>URJL#Dn;#|-u5;mG5M*ZfZlxnmb16L5Q%`7YDFa*kf%xf#qIR5kX#^rRAd-7UyvY!r2Ky7GQ+*+_ z_rBncG2p&DHm{iy!N1X`Lta<$&VFeGa&n+;vRtSm)woYUsYm2AD&xKrvNNA+B0p#k zXtx5a&|#Tknes!+jgdo0C-y9<`&uB!OqTiZ`SlPk57Y078!IZbX%(<}?3xcB+IUN` z+ZSCGnKF=dNjL3;mw)ebFydPMW@G)eKA`y%>}iL9J+-NSfj!|M!}<)9jT{)LDSiDLP$5mk|U06zkU>CS3^g< z?G4PM$A&ZcS!c#C?^-wO>iZdg*MB$Hg@ud&1^-F~9r=w4N?YBj%SRRaIc+O!D+y-N zsDGz^w)`{o6TswuQa_K_+M+k@;t=-PJa2;4Gw5L2o!xLwxP_U_qGa}SdAYLl{(Q$m z-`Dr<)Pu!}zuNtuyHvdoCoGP0T7?`sObw437?-oZE=X37Nw(uzrWrR6fqj;*lc8V8 zs_V+rd1a#8mqGE*ur-6}ueNiztUo~G-d1)Yc?pXcGAI9~(|lK^LQECn=b}7#!R}aK zMRzHYksY6f==w(nE=u>G^-S!M$eu5F`!dAWF>E;;%@efD9BsGRj`PW5Q%{sfc3%EswQ(Dk2mBB5}@)xx&B;(KrcQ&L+|-BGt~DX|4I z!`Gf8@sgYhOKyX7o;K)udg!OE>UcMX|KE_aPI~!`bd1Oiiy|e z*SoPije2$IQ3ifqw5i{hhB(8v#}i(ru2PkmBuMmEAa?8wX=mMh!<|a((y@q)Q=}@p zTbJx(Qz$+J?>lwYUQOeaMS9AEiZikv!Epi?4x@bMN+>gQ17!1v-)F*P5vaZxXD+DR6K@Q#ipYq&_hNMqg4=t&4HXc*!E!9k_(l-Bd?{&=qAhzgzz+7 zcu}>6EO8{`Zt#?8zwXWnn9EtJJXAN$X{5t;C$uwQ9rhCAo_KMqx>9$W1yA;}keGO( z@!@NAm+{2WYa1>Q<~W$ZuVR1gkO7be6Tblgehi}w&(DK^a1UjBC<}e_S#B9#qb+l) zL94d)ATsyxqj}+`_=*Xe<-^^9rUmj084>%b3SS_-M_7Y)HC0Um1x)0Sfzg>bovwS# ztf?=BY+Pf3Pa38}NmLb8{Kdo!aqhHpDPb zVc3cG16SQ^yjv3VuZRfyd(U}Czm_XIhE^_N`jwA8M0z25ZlP|#-QmTzq)a8Wh7RP!WSO0}a zd9Vtd{z3mle#qG*2(%;X%F>xT__8btqHVemvfSOTXn#DglYM*86K;m!pRkae?NB3> zm5SnQ+f&Ge&_GYNtq3<2G3fn><_DI)B^M7GHRQv7O2xG3t9SieJ!bVE&#rg&Wt1)| zTkv1Ep@0zW$HI9>s>BY=$31i$&dOiWh1;L@2G~0(51Ec}4%>YBZvIf9MG;f{(hqb8 zoC=X2H=fh0?a8uyyRoq1o2pg_=B>Q@@*d}({;fYpGd*tJ^mx-hmv?TqOM?FB(pjqM z&l@(Y;Rc36w1t+I%3g^PW@OgFvJDVYkcz|anBUQk^EnKD>v9M))Os}L7c|FAUCQIXg(myW*TA}Hmf|M_2#$n*_Hjv-^!>m&`>Opvtkb}1%`}RA)qX>ZSYPLSR zIc|+X8{J`)^;F9-i4ep{G7p<3dFX)E zz~6*b8YKx<-Ljj=*qD?ZPj(DsGe4d0sr-%-_U}GQ+$XU8nPS|5ICuFmZq=+2~RYe z?sMKzk0btr3Zhk~Siv#iv$H)NU!(CZm$_o=CPJCC9!5J58`I%2F%KK~2VOV?@`Glj zrd76YM*E`N_IFZf-L}r{>9VES{rmU%f1Bh24u6w;}c>zX6QzG?j(L&U6 ziuWQJP5Q8PRU2!Z1Q^bjwm)O7yl6xtagcdRc`>6j@3G| zk$qw=7nC~0Q7PJLWA}=PZZg3#^_?#L=4gbUaiiB#=KSJ2HWeXSdo9%ofcCD&xT<;w z6b>>LaY?Ng9s_jgT1@qQ_h^fhARRL^3g-TsxdBnxLHLk>dr9#T5Fqms7glMa2T%W? zW9o-W*Wi`T*=ARdMU24Qq@05>*qXJ!*#}|T4Ts7BWKNtm?nBK2dw7% zvcIO<3?o0Yf|v*f(eFiB>D20yfG#Z{%sXbLaRnDA9&Wh$hD`~8F4gytD#~$9!nY`O z1X$6Jx`$*t7>m;p}Dkyeh;Iy^%7l+0(4vXbf z2BL~qcK|yo#5W+YlwHi&fo4KFiuB(P*M=wxAcxcrJuS|_XVq{{WZY&Vt$w=9tJyMX z@wJbPe~P6${vxhv;{^wU>+PVw=2??yye^b|Oc7^=Npb|iPa8K_7Ax$SRXh`dKI4W8`PBR_v;0HJ)WeUwL)45&z|fZGIy zB?P1fOhrOshL78oNSw=%sUbA-v=OT30AYw~TETm1BrskPTE-rKon256FfG$A6**(Uy4vt*<}6C`rwb-evD}TK_$H+^t$5) zT=OSBGhh0GQAejr{;8D35rk`k$hRrU1n-5P2d>S7QSlrHx5ou_!-RfCc&(=*t1Ynmx>Xq}d4xFH zLig-a^#te*4iUa4OEJ6`^XD-50=yU+K`g}cbQ{qgS;a0S`@A92zP8>o{d1(r25t4GB7SdO#$Zg@Z$vheWj3!yvG<}#>LXUhDrGN zz^nXt--n;W3Yg!+WDR$qrX{~tb# zeha|yQE=VecmQ!ku}JGnD!Lo=yIlaC zdk-7s!p~m_|C68O=l{+dO`_h~8t?YNJkkXateUjXnz?$d@&pvuKjkTwJFRztYNA?R znaUZc77qZ;j%X>Y7Ry~MnC<3zTBDs`{0ASG57{xF`|@uN-Qx$w-q-HE0+<++uz)W2OoAx5V+X-+J`0;fdGC_bsK;wwZ>fb^*0D)Q~W*J48l+JgoEi!_CEE>%|MLC-~+QGK$9VIP_Y`I1D>4asc5+Gn{+~ zbZ*Pz4L*vh2mi_D>7g6|u0*Lx!Xg%u2cweJlZB8iQ6c)^RG5rxImRLr@d^pDhywkOja) zqpA1|8hJ|5uI}{hl5L-sPYRqg*`vx)daY5|_$y$1Q`=(56|Q}GI5?M4%5*KjyfvDu zy;uZj5H-Q7w>00loi)qSJehW!n)iBN-GxJ5xs$mx{7y$j``)rPaxyiX^@s$4@d>e~ z%mzLNOiGNJXi2OWNBMl z&@yh16T+rkFcnekAepiYT~&L77eo0R2O(!W73KP+JMD1$Hi)cy6XGiOD{i|&?(yu` zH45`RwbNGtKLYBc*~%sNS6>ryMJV>i#+q(UB7u-3UTq4HaqaZY)#iCeau1@v z{TpF)!0IW9BC=^$D5Wk+ESbn)?=I>uBy(3`KyE3Qs6mq_2I{M#QFW6(gZ>Whb;Z== z9+U%h{$aN1{X<-5a(6{z+4elKV%B>jEsk#C+U?Stka&xy7&jN?Mi`UKIc(&0d~U7JsQL2pR2Ogdj| zpdq;}!Wd|vWP6{}>W#yyNidsU7`omM5};4>;#*8oUM$WZNUhVG^=6%>bTxi@n^tgo zN3=^&BPOk4Z;C$OW`M|}xIKiq<-u@9q{*JtX~-;}AIV6{KCJ&*WVYZBpgi;y*7Qt* zsz6q>k+m#-_oLW)qo*Vae|%J;cJBc{I~Yxm{lhgSCuVc2je1EA!qs^{5io%o{4CcN zhC3zAQlUg<6iNmC#&B# zE{fV4joeD-xnkES3|D##XCIom4Jxo!NA%*nx88{%p1`NLuFCUl8EVNk-ag#l2nlVCikf7uWJsU(KIQ96lPD$_-|aVys<(% z@1}?J<&3~}Pr-xDesL(q_%TG=cqiEozPaOvXP0}q4g~if^x9&`)vHV(kWjJByZ}Jd zSr_BVp<(2y#Pz}@JRxBX`|T!B;wRb-SvGj0dhb9t*7fYvg=&V-0`YuH#sxr-Hm58p zsF;$3T|>ja(YNl4uXQqUyFWGNdL2#Xlzc8(VR)DQMXybfW?2+SZL$>V2#8)=(OZJT zUM`T&J!VIA8a{_nKW>Wyiqe{n;ze{n;la++%&>j%h!&Q2}gVL6zk)Odr~mp5*4R8N_Pf_UNlfFiHeq=`#Nbvxg^pT~6w6 zMP8`;L3}sfYs-2#oz$z2XCCY?zKEz;8 zYTkK*>G9nvn*edp+*-cv5i?OSuR@upV_=6dzu8)iKcosHE(Gne!a~Vh22Z7*1jPMz zL%uhyddg=D6R899vm4Kh%bhpdwilWhGO4!pEe>B8?W>%-sW9i16{<;V}-DEdStKS}zbVk(E)45Ol?f8B$+`*res;)xq( zo1PBwZp*9)`m*Z^{0KE9s@(QXEx$jIy0XD-yN$xccD+nto>g{CwnTm1etkcv@@RGt zMX!I5sDIg2*M-`z68TUSmI-XC?lR`-c?Xb4lc^NTs&K);=K zKD?0WQ>mo4wW%fP88h!pJAwDkN7AL$fuOdmk6Yc`OCA~xYVL>aGVA$p|Agjhgw1@v;oU$P)wkG}8#Gv$BDN+5SLlR_zxS|v zrpF|oil-tZggojEOXT8ldV!|?HY zKY9xC^o=@Z-ehwOKn+uB6ln{OLM z@do4O>d08Wz7eu`r<49L4m?{-@$cQX*Ei2DW(5iCO0QGzec&B!IVEzK2gHxl38Tg7 zL27l@!xK6T!m;<(mbo>f?T3zJWmc#WUvrDt6W2<^nB0-$??&vfYm;E_a!B35vNz%SHSqdxkqNX3b;QL()<4l@s zE)jch*z4?irouCzA~Af}3u85Bu{SQ_USRg%m1)|S%a~j$R-xzrp^0E?b^b*i6*WfO z??0)de*^+`^a?^1-KH7QvC+#oRyw|2;`XZ=p$;|*W9A-{YP7hL-(G-F?#tgB zq^VEpMI84G)(PZ!H_D|}YDd72w*VHa_T>QQ>wLS#efaFG z#iw&V@7o2~&G~)Q1SL%i+IZ)PNp|ih2Bevu9O_C7kG?D#8?N}{&f7#A6N(jrV|p03 z>d0bH&ABN1)yR)sC`dx>$hNi+*=qu?EZ4a$HjL>+W3T6Z6GT4RA%!IfY(G-?Tacd{ ze!<}N?Z@G$9f!zq`O=xN6p?uOyMVCde>XxwjQ_ABQ$L&w6A!0+XL|?q+Q)DN`Cv>6e-wDokz~U;f!Fb?<7~P^XZ9#5{`;?T3M)cjm22sg4 zY7zMqU)vVLIG1yLo8&jXK1h_gqDqpmR`xBgW&bm{2Y?~uv4gY^(HN7T+IDrd9Y1E9 z`U{-pRS?|x;N=f~=YQDRM6U6lz^u~O*gshj`wD^fC(_=&qTGd9C8Ac`CQpn2MqMng zJuC2uzseNmIJQ0C@mNW=omsR+YFyLTNqk>ru1O_)*Z%HQp(%V0$360Uo86v`KpTyI zoluM_VYTgIBl%v3@}UTc43hf{Q>PuRsqE8TH#2oi3PpEg+1$2dTPE%uyDZdrK3`gU z*9rf?4WI-kac*bg(ztDX&AQ$TibcgNKC!u;^llkJUD7ql0*7U7h)n7CC4H|^SpgT& z4{U5kCj)M~3diU;Sma5qalt)Tu5vH{{56538%QKfRzwDqtQAcNP+mX1G_!ae`t5WZv9Sp#!{kVen)pPpp zhlOHZ$Pu4Y5iLJyvYc+}NR*(;I%CqU_03~{!(BBD!NGlk{(_OUVfiX=MpD7tqU|6# zIf~kDunByj=zc;VeArci&bxHJ&8jc+g^&{`R)y1V^UrM{oq+;j- zHSG8-!OPc-_+%KiIj+LpoWC(iCxJ=o`6rXq>mT-Ak~=hl-^S+4jyH5gxLlWM`Gm)r z<@Xw#xOG+$@;s=+mR1^io2Dcpe}RPN2kwOBp-z*ic;e6tzaNdw`+PtR1S^CYm9y~F zwx^DRq(BsO(~-+{(%#@?z4w=5T%8)!lPR6q{OBk`Ytj0dhP={L#as*RBz^Q!nLgXK z9NWFJ8!I&BUhKu(m%U1q+=F#?cp2cf|iPBLP(B%=Z6Pr0ZHlL*h&RH zBl*GJv>C1-Zo}Ix(G8XDL3B>TIF(pK{0{^8%n{ zN{34w=Wh*(Eh3IC>%Oy_D~e5Y)ZkOO?=zc9~)R-8UJ7Gy?0oX>$fcm3L+?=C@3f*Doq50NUw^5^bSHO zN;P0WO6bjsQWXM7@4ZQv8WjNvCDI{4qCh}uq<3=O;QE!luXmqw?>YPYwbt{j=W#7V zNWO1=^PO|dF~$s)i*MZ6=-4!L9oIWU?x2`bo@kjeAAL;`c@a3p4jU!Cd0nk>Y$#VN zzIg_;xGZv&ofZiymOHL{WUGSny-%plhQsdRsCb8J)QN^KJtP_nMYdF)xR(RJlkNpG z$Wq}{tS%GCd?!-xI{^Pud9r`$2zdElzNBMhU((2`|72gvPPQ-QGX*tg?(MzfA0?N8 zecKbM;e%91{n?2fGpCg3uu9oL?3|*666+2OdH*mkj%sYpkme30t3)HXuU=_(1koUI zrIW1lvs6Bl1liq+#Xlp7Z9N$PeLg@QrLjA8907d1${kUm&$wKARZ8A+JF$bZv%_iR zio)wf-^ZRgRX{MVuU?D!Q3MPnxH~;%9Xq%N>Fn88K6ts#zsgtG(%VyQ0xK5_}f|XI! zjnhdU=0J^lguztoV)y-=Q|IS0im>o&uq0_ai?Zg2eYtOIcbD7yG4iDI`}Tj0D6)1x zshC_jvc=tVe6loY7nc-tWZq?57JMhSxuqpwN5A@7v8N5Hn;sPoRmz0&B2T1#cbJL$ z!4mN9%B(94O!g_V&i|^f;F=mr4j6LvI}B_GY{A8?jrOTKwNAECA&>X-j_Wx8HY+8o z>pI6*|6yo)hHPkBd{#_;u$2GvdYvO{#bqYe?fDskMqrQVGU7*O4!(`A;C2Uaid$QP zlzhE#C&aFL-}Q`pvqBO-r$YBrYW0MO{kgLlTBeUcDrMgiaYSXfVnV8Ar(io0RVIV( z2L>>yr1#T`C98E7HN51X@xA9Zf)^6q28~xc7!rf%li1eXSSl}`zrRZPE(0bH{nGR= zYR5kr745)@XHvWJLC}M!h^buH*5}Dt9%^|ZnFm7lXP67j-dERfyCtD=I&$0db>ErCe$-!sL9LiFkQe{a-nJjB#t|cwWb_(H{_Z1Vg#LdXez3eR3UryQUx6#>KU__(B-L|KPRshQjHRcB zpYS^$sr#LiQ{i=f0qD{WJWavpW$-iybRFOKS*S*uUoW!$DyV{G$Z|RRQR}#Ote0NY zaS-9FWD75lP5#~F)b_yS^vx5G|7>#lX=>Wr%`U;JOXm8@bUUbXl4&exEsxdxn&Qz9 zI@VgCoZWD4KDf+!W|U$2Yry;aX>8lS9H?L8fs+$325e16XmcmClU#Diz+dH*Q~?bs zOs-+^r9&PWTwIK|)G8Fs&8PsTr~N)yv+L z^Ha!V0$A3@KDHK^&m}McGFFNg!q8eQ;8V3dKt!20?=Ou<2{nQPN~$tkU}^5NDEW;2 zHxJWC7TT2>f$7r{sldsq{}rqF=}iUTe)Mdf;hUfG8GKhm{GS*vAcMVC>=%1H+OEVw z-!g-h+g*i{y#=POM@{+)W4^7=2B;Emm>J2P6y7injC`)U1&31eve#CN1IJUt!NxhK z`ta@?Y$j`oTB}T#$rG}DU_Y=Atk)4ByZ<0TtTAy5D0t0e#kv&JEP`!mz;Kc*(G1rD zR`1U;Tq1`qN619F(*NpK9u~N_0uK?W`nps!ClR+h;rzZ=i4RyP%yeJgK`=f1milCS zZ^LG!Ld94Bdk;MhoZSiv%Ytixd&1}cc&2e3!CdTAkP|fX)}m|Ne*WY5u1h0@rj%F6 z;o07W__oW6evZ6ax6ULImBfICm*;hOQFqhKW@vfXuu=1w} zvvyOD{ zOrDXd1g$3xaKAD?YmkN8`Oc4UMDgV>ur~EIs(li}t;lV2UlSk>aSc-O)>XG(7!DnC zR+p=!TKip9(GAVLnb-M|aLv!o!eubd{%g5kAAriAW-yD}H=1XUW?RMn2{Qz4gWXB| z*Oee!4=-}}SN+;a36pca8{ecJxgrZ(L3FM=Q}O=l9w;4rCTT-vb7j4b!dIyTGL31R zowyMdapbA)LLCt3fo=jEc4k+nR0ilwPp1CmT3Ub7w}J%l*&7GG%P?@lJ?J@fI_jtu zSQ-fw>ES@2em|6nWp}EtoTEoHOHXu^+2F8?ZEK3=U&0s-`;H9m$_-edhQlaK0 z1=wJ*S-1t*jrEyHvf$1+qtAf6AP!bqq)^w*0SC*w3*}l>5T#F!0__F=8pgc2Amaee zvxz|KYmJKo16duDKHv-!wrwcCc*2z;F@UX7hveCC?X7yvq0b8&TS;soED&QZAL_{BhiRYMql}0aZjV!+lpK~=6(zR-IUHIkdN{%Etrng(`dp*uaSPF$- zwns?b=H1LkyA??sWT*eywv-20PW_NoqVZ!h>>60m0mq%f+5Cw-ScP}f{ZDKY)w}N$ zpFahz?t}Sa;OP4}Fb{AN{KYE7@H4fPHL`(Gz0!G8rqt1`ugo`7l>FesRk9p?o#6Nb z%s+{%uE1>FZrfQHoAf@24QL2~=`O{eo5gT;o=PyB60fw&;Z%};3`RRa$*O+)YY}9p zrGrvR$-{|O&t8l|e_cdwaX3hK5+_@yZJQ5tTP$pN2`kmm8_s~y<7r2__NZJP-uPiwP` z6I)89UFCO8$c{;fo21rb=+e8gcKg6R%Z$3Uoa}S@si$Rn+iS1f!mLvI0p{s4D16E; zy38M$2LnGriS!Bb@}Jj_;AD@{WPtaAB|85T2UAN(pR#V?p(UW*C2r<)^YDS>bFix9 z&$gwYlcWFgG{tXC@$8RVKBS2kv(L>Z!cbu$YKKn0Vss0|EO?M7iK&i&z6Ta*P@Xte z+<=RZJ3C$eV`?scpK)Dezsm4W2UTxf!FWJTQS^|G8EssZ+E-{U%`Q!1N(fUZ0pT6J9>+m0lykhEWygWgMj$U)-;Nj zLfU_FN9DTL^5ie4>my*NWb%ZQ)XV9>!wg@SYSspzO_8+%uPL}lcnin6ghE5OsQ{R7 z4eB>|4-0)zAWwY%jmv<%ahE=MMFsMv>mW{hP(VGIzJu2}L!vkwCE9|4#`vxi%!SJ!{-XG%Vz z8WP{-nDLWI_;lK;U{FXK1u%W`N@MTub;Pof2e1O9geBN#`~!c#>dE2QhA*O9bqT(J zO_%S#H?Qd5%xrW#eDbwb<56ue>l7`5Ry$I!?Ep#@d6D8j$S@#)jwfq?5_zzBFTy&a zi#*^gtBQ??bX#VL&F~4=(Q3FMRJc?ou;d(G*H(QROuf2`>PbCBoeaP${zu$^#b+b7 zmN+oO9C$RyK}`-C>l%RR+Jmih9bYaeiyn^~X#EL2rp=Sr3_JBN&(jAZu71R(vRh?p zM7j2#e$U#x2OIS91dqVmENr@wo(+)M9S$4 z9bO3=1P$kT+sH&AcdK52d$1l@*@7Vt<%w{m6jAOfJfgjWX!}(9vAjUthdHr0Y^E3&7kP^SPPcq754ux&Zb7a$^P3j zi2;{Afsfq3?mNH*VQb)0dfj%(q^4h>Ui`5yRwJg<_9Ki`;3sb(>XHHTHS!%Qu`lKX8J<)<;=I zl&;RlKr~8@viG~4k_h>b#f`wTQVEo8`z!10q}3b;C6CltwVdXqL;kN5n!ph zWK8Nc%d@jc$`Odd-0&;5V=!58SD=BjmU7c*yNy8Dle*))I>&k|fc4>;trajzO)$iB zgw!qK&7s+SXdhwcu=Yn{pVTE#`|Cm2h0Iabh4J_)ieN{dY?_pNtb(!`_-?F7){RBc z{U4vvK1+~d|65Qc8PZF;U0rkn^Bp7Z6fr7oYq5m;+=-QJeR>;On53Dj0%#xgoj`l+ zA>N|uLQ|0i^htw&LDL(n$Y2){{@`NuylC%=tdhIc0vf8aObV}sD3R946%j{{?A5)o z^^H*DJXoMu%gaT?nuT8kSWNM{Ip+6o?tw?eT&tXUYNTURJwP-*W=qg=Mq=J#*I zTo3Wqw9Mc&ttjqn8;8qIU#zrks7=G1*IC48mLNVJp4Q*h@V}NSt(h0*igNTG&3sdO z2zrQ@=@KoPxyPw5d!dDypw@4sNWq1o(q6tV5-8#@Q24F%3s|f^f@f2TQEj_6n5V}R zptk%KCA?AziTxqD`Td#V2=Le@IJHIGP@ihFGzja@P>#^9nUA}tuH7Xi$PDPbHb3#a zEVb5=8YdKl?S5q;{al%hdc}30I6F=v%)MHV?bqqqgHtFkL-1^Yz?s2ZMkgKFrIb^< z9wmJ{#~Qn1+U)u$R0mHfd#Wu`ffZvB=HG7HP3fm{1nm1Y;|t~}O3*jx^$mF_PrIV0 zZQ@7jF@aY6*jne_m-b&b-2^^G=E;~cDX6sEWLz(jU=$mxjjmFcL~cY`Q*gP`5sd5t z@O8hU6Z^e`QAuj=NbO5>1Vl#%#11G)(}_t!nbtf^2{A-$PR zhBsWYZhu7+*Cy_?$EYYf>V4FP7H-(vDK&mH3BF~2uF$h9$FD+1HO zSz&4PdrvFWEabKGW2^dY=N#ItvGq3P!OI@99+g$8#`!>4=ee+`Qj0QF{(sq?`rqTdKRL)MEAQV~zL5V-0aGZ{tQX zJ84s&hmD|;S}0V%e~qQdb3MXQ5^lb>AHIb;Y=EEB%D=~^w+(%Uxx^RM;3OFef@*xU zLkxfKi2&tk@P>p;40tkn>BY@Z>GjuY^7ySx4)Jbr1xC_DQ6g~-K0)DJ=;O>%W$j}2 zeRl;ck`vrCo|0LJ=F&%Pzv|wME6=#`@SDQkt}Ef9wOl$C2&aOwC@timN$I8f><6XD z^6+C1*5`-K_o!3@`L6cZ#*vhqzkhtPKM;Qy#KI8^)nRcSXN_?s)lk>VDGQR%czk-* zk1ZEa*4*rk=5GOEW?3CYT)EKk#rN6GvPFlF)oT9zzk_Y6gk}5^s;rN_PO4W#8?0#H zokC*TE2_4*s#6vk-4LWG2C{#pa#fT3!OPMQa4W- zx*hB`(v#VDnG(4P9w{YBp^$~9!Y*SHbkZ+?v}Nab?{cut&h?DdE%$jutVyY5meG1_ zR(plbA%d}+*PUT`mnyY{Qwn(%@AemN&}iqoCM1cibUhOtxR+u5;F)hrjfGG}r$#vQ z5R-z}WYIZ;1#H2N*_^nk8kAc*dD>wxZy~k_r1<9`J*iP}F=ij)B?5#e|CsICofAWV zqirC5qyTlktOnqVw``gLCwL1QMcBXx7-=LJ?$c?Tet4Y52C-HD=>%4g6rz+L+-9OGI~5I;;X1UPg) zp$L|ICX_Xw5cMJcL)#$hw31veE>_-L*pI-`Zj=wP>iDYjN{0jY#fPVUp+V@!K#=)h z4DBef18tmZ4c7uUUx_~T{Bi)JPp=Kjcn_ZR3qLkHQ~wOrY7CVVI2^roCD2lHt6tqI zf$==Tu~|PAF*{&rG}@~S71TgCaZw^aFT@G|@|>@sfwfaHq<9QwBhp-6A8UD-ch9k8SGWUZ zky0X{7YaL;dfx38;dt&czp7c>$0YZWs`NrC@FuFTM2FqX_NxJVo9m-@up)8$cDO-* zSXOwAXJXYs2)e9)oVbduXm6CZZ_iZmN`Y}`Yv@tfeE;dnkM&4kfLGXvjMbaX1uqXq zG-yM!k*c!T9;3{W81AeKw5eZWDO5$!UdHMiwut0mk_u@=dcOntgdBtGTegDqo~th+ z?Au?pnk~PxP|e#g980lwiWTDtEW=vvRKt&_nmT)=1Y^=FIyCwIJ@`&SVvXvZ$)9(; zG9uAJ$1!BV#G(BPCna*1Null8YYMawYB!y1B`+1e`AsoeFWnYGrY9}~@#<~?%9&hF}Xhi&)G)kK-x7EY80T{DLRLFb&xR8!@+w7aFZifS5zpP6PLZ>s>D#R z&+__B+-nWn(2M~#o0PRwZNiU83e_LB47!yIA6`_c-?tqO_o?cQ;R{;!PiN31O-%9a z0~^;Ui+ydV(rC?~8LrHpQe>~Z*i;U_Ws1?y_NT{6YoL-NKtv_K6IN*0+_9*)S$89> zlSY09TX3RDh6aKux=t|K)I<+OLivCr&0?XzWx3_`6HYZ*sfFr$h5SbLgoe|n6yC0V zlHV>|meRE3{#Ze+UCe4$V{KACrBP3)b|bvhs3{vihgT83KNHYT+>5*|!v`zO<=VG7`ZOx$6bxc6ec& ztnbG>quZoXqwL?27VcjrvMcA^a~{sBdy{QJ0BKTX4>$LVTAd=@fp^DCTbhPH>B(q@y$~(nYs`YidK=6^3Ki*F19mh|- zXksieWz;Iv%L(5Gz{GBmC=ECOk2fOAoHWPn$GI)+63=;+w&In5k70qYMBRA!bR|KZ zb?t`+vg^Uj6^66R9}BQ9G)ZgPKk-Z+Nk)fwZMlA@Gra^xpLNyBuI=cJ7k2bAVmaz2 z(FX@8|1%a5&yy8vDreZU1w3{p{nK182{BalZ^Zp`@N@v_bxpf=rKO~s|gf~ zU9DqLdc`NmaaWN|t^LJAB<6>ge@m-I7!39ENkT>Bn6=ZYrx63w87kI4v>BnIRO=oNyT_6oY7E&0Bx9XC=C zgsbPQFzX6|8~F)R+kpL`Ru6q@LVd#2Xb^$dnxg#tLh6#Q-|Fjy(|ZMD zNd0ae4w22(9beu9aRxMqO zkKXfGZu&GYqN4Iw=&4f>2(cE1P)Fmf`=@wY6XgRME%NOz_yH?~8~{s#n&fSHCItM{=p{3T(Z-eg`3*G@a-gZc+9){fKs1?C!~|eWSwM-SZafcg|48C!{r zwMAL~)EAwa@n{g$J8Z3rW>97vyuhDArQMj^wKp!lYozUssyHR_}Ai~OHV<3&bs{KZamEBmT ztfAy8PA~KmY^%8K_90rMv8|^YdzVLSNkp}PcKb=E+KJQY0x5HG179f6-DhM>gyH4a zNxj5A#75(MH4T+F zX3_@6QeA4C<9_^nX`k&Xl%*R{waPqGm8Fu`S|PuYStD3*?$}o`ffOB>w~ztfw9Tjw zLMP^POk*2ZwTOq}q~66{|6VgFubUKJqgLm1I%E9-+J}y}0^>335N&12bc=PT!w1L6 z@kO{`AGIs+Ja+E;2aki!NZnNp z_MCf!wRSdzx5L+!A})Y61UW7Xo9c6eB}4&wy`xNrx;vB008KC#G%-6_E^qg)+~X`q zS}*dPQ9Ql9Hg^Uq-^&WI-OQrnv8S*hpriA_PFKmDyXUT%*AEFXE3MZ|giGs#_B2U# z9MI1TlD0BN5s4A=#-?Aw#IU(o*Solp!;CViY&RahDJKGpjK$b|wbpFGgH|vxP zjuawVuVpPi1`mF`$jYaiJo;U}+DQr34n<1!5(++vE__7(#qX=HXq~oFM*A=8U7lMP z(uc|oLW*8fo?c4)6xHv9GLo+&l%V!bdTA^@%EI-4iy9$eMjA~nQlBdrL7Ad`f_=i; zO_nHA0`@Y3&slcau^r|e2mMs>Y>~m%oJx{|Xl_pNhnxs^lFPjgQNhDfai}-N!4@(5 zKRQD*q?I_cMFGK}uZDS{*1rIu%gj=w>4hTKicpF>$WO02ER>CG_Fk^l$2q&T@&^4Y z)RYhLE`gRa)Ww{|wZwnhfV7M%YPu`2yZyGr)9+R7UOF2{qnart9k|G0qkGImq0JWl z0_m3(HSLtAqxME%^r=*PF~h0~yORN{tJ@B4JJG!o8*@5c{CZZ@Rn*$n>j@qH@A608 z<_yh^$Alw6{p3%ZI>{*=2q4*0mEev}eEF?`@1WcjR=#K-`Q2g;~A6kU&ZCfX$4xDa#GannupxV zfF;KEM>CXYEbot- zPY5*{OJ-zuxM%QNeXcm7{Uw3NJl}n$QSs*)*Yu9?vXLj!K5B(C>-@3J<`c{9u&w?I z!B>}1$i!bn5{q9+@&JWutE>^`9zXh#IDlOAaZcou`mDl9f>G~s2+Zun9ov#Q|J$+p zenBCB46PN}3sl;Za%S6ya_Xfl%w*^9II+r_ZGBK$XbK&f=2!E@ZETL%FKtAty`Z|b zH6!knfb3r2R|UQB>mWGg=^CS0|5()gp!wcHzBx|UyLK6N9h}pNuH{3ehei(ZreBNl z-h_=AfCBjHI{l;3inMAC^v`t--M{b@vrW!$5r@`Ti1?c}to}q77Gl!iJWOHg7T`5#$(4e1hlhQtO!tx3@*VGfHON zO_eAB(6vA43#!gX&M-&pwOZj6r&VG#w0DvuRiT360C`onpHQ$d;R+m2Cg+YU2^-dI z2acMLJ^_c7Qwm;cHGr21sG6~yQlK4O$n@!_jSb&u46x=4a^#r{c)_A9H`vLVQ(kbj zF8Q7phI6ih=rTWsb#Yc-5f#MA`@Lsd{pN-9L1vv_^qFCU*W7qG%cj)>V1q*(o@*wE zv3LC`3uP^viV(+dG5pop#Y+qU?8V^{PN7l@=zu&uBfd|Nt%Tx7`n_xlr3QKJ6^ywc zg)m9j4xB*rPQ>OGEJ{Po8dPbM+wg8S?L~j8_XXD;u;h3AeEd_Au|RKIV--oV_}byy zGX+C2R%PL(iYQN?6GfoRbDcpU@8}x0H5J1{%G<^Vgy-y%T)G_ZQZ&nE3F?7n=9ePa zMMdYY*w6WcSu$o3kzcNnAT|p;OIIS8*p0imciyc`b*@w^U3R3n;|Uz1_3PXhT-Hl8 zg#>7|?+BH~J%rfeeG}kAN$a5oP^do4bkg!g^xPZRyI_H-ilC zgT5G2QL_lSsDYm5f;%dCeYg@h9-|v8=7x3-ODOG2GIJ)diRaJ{exRp34XyaW#uI31 z`1>Ns23kh_Dad<*V47r%*_Mn=oLd~cgB?wz(jL{DExSgE1ii42+I)o*-h6Fw3_1qv z&dncwyb;BtWjI#nF{pVsIq%v@*SVtVd?3&3#6vESWzmS)zC4NIYdcJ_mWOyhm)W%U zf%PN?V7BQR9+h{^_;OlEO2Gaw9>SJYJi#lo(gCptJ1SPX#K(N-9$_o;W_xoYwl^ox zGtmwrpqOLz<(qNT?#UDoUlx<8KdImhP`gDw##rzXb!Q_ff~B!N!_A_On&*n0!4>=W zZ_K11ob#vKzxp6v51U94%u0LtUGK{s#9J|O05WeNYj3bAsPKbZk#q)mR?00`d{fZe zV+=7{+Im5c!*BJiMgEjXz`idzD+@imL|O+Tn1Y?fI<3xR*^!|~9I)aEF146kK*!Cm zTdXZeXK<`vQd;|Xm@!uz?^~`URGp!?zX!h1+5dZA$n8?#A&TgKd-AJBzmEL+r33<) zHuXae89#Sc%L7Q~;zKpNGlL%_;U8LPQ^7@X0GG!&OAvI_{~>B3Dd+;N z{{EGJU)aCH$NXQZ_3wuS)E$3kpTBeLKcIqt^>2SaB!8cme-|Qumsns1egq#N$I=) zSYkzuQl&`aH^W=a0M`O6o!zn<&*86BGMc)Y^EiDimx_?-xJJyyU zTLs#=Ist+8pWRJ67yIFApem=kgqb+?RFhU~3%JNyXk4zVFj{h(2SlXPu1p4C(|l&>LC>>=8W!t|rKh>b9734)}n0q=RWOVyvp-VY}#} z?EUOrNR-29;@qR2JMgmpm)MOIZqQ>$FE)f2rPO+BMww37BBhYEef`eylWgMCEpU_U z#sEd+ite@~U?imXUG}?Ui}Y6aA>EIk)1B4CK{>MtwW_^7d8+5S)SMgEpe{t)yU=8e ziQ_=9nD`aAqv1KMDHe(~s}>NzE{EDG;%j*klE9+vpO@m^)7Hf0kDMHt;C}SP%@j1eH2YjZG<^8HLn8U(O>qD$dSi2V=V|;DEVI|NMjmBi9J$j6{^1cTN zlOFV3I>PQ!D@FbeQxy-e&N^LcGt=MTY5+-EWscLCeaq!J4n|yU51YLBs<57r#j*3{ zo$kuxOyGe1FDV8&N02lfTO7&G6V)C*g42O zt=(fRxhL<5>Cz~6c0Fipe_xp^flL;Mcn^LD=;gFNA6@+G&8f60E>?mz?CiScko+(FIJ0!lG z5(yyD5$T7VVz8du@ESsXu+%KCsP)$qq5w`K@m}nR7gg&oFe*uJv>z-k%GOAja-!B7 z1&XS)72WxPq6Ac>?JWR9i6O3DzrGOj{N!kqn1|NZK4;)~c$Mh{C>xd@D1l@3NKPjp z!3e--M!``+rj4q~_83_!<^7}85KD}`1aQ79q2EEWhl0~ViGnK|c!n5YWL820ZdBL& zOb>CHody<;OVv+lSp-dU*+lJ;dKfD09?pYz6Zt><+q3R6%KTDT1HPy`%7`f7Mfs3-YoQfUoh{r_`I*eFlq z5rbxf=Q=P|z^Kfcw?2fe{adD^3bz!tj_8O{j7J`RIOz%>u?I>OYZxdFHc zsusQ3w~dz||D;7%{HQ+-fJmO-hh9&N-Hk)MjX~`0?@)IzKz^5>tsXZ%wrTxR;1C$b zmY4&|F{;9+9$>Ea-%@Yd15W5Hn0GCc=$gyOz8hdaEg{kA$$#7JY8JPk;EIZn+rA4F zJLYM7&44FSpe48UMTlBk1~3mFb$P|`Q6k;Id;mbEe>luPB*wb=zUv0)>L(aD#GTck zQsOl&B~3a+qBsf z@9u+PWSVP%PKJ_wMwl4gUKoX{6dA?OQ$CVC7J2XlF7n~!!Q3J5EIWH(Sz*C}@|7f$oBkWFWDB^GXUyDme%>2z z7edWD351H{JQdm9zTG5G_awp0X}By){Gw2~m4483iLwp$>-Pgum5;F%uv1b>1`lXp z?>by8)55s!-Xk>bEb5V6T)Uh_qQ#_vxpx0_O28tKl|BlQc*SM0u(XAZnV9FkfVJ;R zl*EZ0;++AMG0Ch8tnb{_ewT_lzN8Zh&dQ+ikQAEPyq-ZnDDkWp#Kd?^m7AkWSVGjn z>u2~yGX6;p28B0y5cNA73BbLSE(eZ70aayxcPq{6HAp9G)rOd!EVcfboRsA900g?{ z8pO8aS465^^_csyg>s;G*@DtdjCtVc7ld%G7}@=S>wuxPzkwp?e$ZMXmd_%@U_HPe91-S)P=j}`d+kR&hfDN;1E!)say z^_de*p<<>J$2G|xAByd%X#?}2gV$%4d=1r&pwb=(G-Fn{gwP#uwEaHksw)VrVHCkm z!jz{urEha`KJ^8Dxs3L)X702LM!9QqgGRkMS{o6_kZaJWhKT2~bOhi)b#$R_D&E1H zXrw$!5!`VXJdII_e#h3Z4kc?5`l2cW&P-%;>$Hvc%`8yQG$h5JZh(gt_}r}j+n>9X z5-I1m{XCq2On|PPA{(A3?g3^>s`uJ#)=T&<$V%L7x86HtYim>~|+KdXNo&50JQNillZCA7&ws zBfV#{82fCh_n}bk@G4)g@Ho-iW^%hu_7Tsd^T|pC0X@eM_S|{54m11jX1#p{Z_1Spe#5 zzVQlo{)(J*u9~B~Sy+-^lsn$V_)&s~+~NgM&oAw>MI`?;f8*K?hZXsl5`D7SpZ9J} z&1}gT+59n~d>DL%#?G`vvu^&b)0-^+rpkJ;620mw?EQ(gKAf_fj(BL@-8u`&=#7Wo zkGD6~S_>Fie4#%Y?LvEE`1QeTAJC_?$Sp53ZjD$_NkBg+9o)Peg{QBC%he@I-115E zMSI1yghw0>ACc{=+VcJ0?@-ywm&IMOB&)@mw>`FB&jxr<1&t$T^2x--@VZH1_%JV4 zM8c0)@nm-XNo(Go&q6lVV@wGSc!;xD<*vaPfmkpD)iX~jY4AU`l-!Ezxd2Il)wYnz zEPNK$ps$pZYf{$VW%b;io-!U(*CqM;HEvJsSoG(QxM|fGrEFt;5mWtI4>w-h$CoU% zzf8)jov+KVuQEU7RAZyF*iUdGGu7*~B!4Hy|9oUz6ALVJEvf5c_K+d2eY&={$5_HR zHHhEWCi?!0u3g%n$!}lUc(@UgU*W};g1O-3^GeF$dnwLOsbRaIb!5O^TMJ&X*|z7;{H>-jBjBRO5F! zs*|&&7Kb%O%}Ppvy-`fg=m-LkygcwZBRnv*;i)(!+3+HGGK{SSIxdsZ3|p~mHA`PF zYAqVEY*o(m%|FcRf%CHa={ZiOUpaLWfR{9ePY8mzdtEVzO4WOPKCT=C%SoDw{YuEq z2&an{aAQpd`UClj6yBR*`LEa%wp{yZ;~Q?j@%%_hlUvfbjTU0I9WJxMNU zBh$k*I3L6%dEg{hYsseTs7j`PW&;f>ZEv$1nql6SRfBD;7Xb=U=B(}moB@@E^n+G$ z?Q@gI<%A~F?iy8%vOu)GR;eTMUxcidyV_i0ywxmsBB!*|$#>qezYMVwELN=Q#jDzT z*jX6vKe=Vq&l3`}DNvy7_?&oNTCpzRbnnoVxW;CXeT~W*Tu^){K(-(V5VQD*?bZ}o zR^aCCYN1x3zM?eiUfR$qZ8%-3%<@2(eMYDF(aIfmt&QFofuLs7iE*Em^cN@(qF>6o z#1_(9&5CB4Wu%e;gtl^{$X%aA-K)fmAqQWed(zfw+-xKZM(}0PiT?elik;iSozHs5 z%`j#~wd_4A{l#YdIbpr-$zbk$_stbS7xSOtahMzZNmMFjBbC~O`fu#z`Ykh~$B_)} zKnFNoZQXzOoEl(c(TRo)esFufSR1gDDAs8Rj67z;br}9!VusMB&Ld{`X2%%UCPXQh ztx^^Di4PX`b&z6OBYwnJXJ~TChPiHG*8K8EY7sp)*u8d}vTrA&B_c zh?~!j5ay*pQx)LAm6dO$Hwvnpv_WK?(UlWX|H{M8G)3#uuE<)#r@{XE7e8*c1;$@c zV3k|DirDp{DF~~6$hw_)v=By#Z0hp5y|+6zJ1j&MM>!E)ea0J=81M)k0JynLDQPZ{ zOli%u?4nL5ukn=pn;HG(7OzAOw`kVE=k{Fu@fjWlMPg9-zI~UdEI?%q9hqXfes4Ud zhFVU0c_Dc>oQ*^zO;v2IUAE}HkfYr@0ABKN)C~~~MOxtId;Dk+-OpLDflyZYV#Gd|KCU5A8Dr=uE=IJYB)t)@V zwv3w{D43wno^tJTSjPmqV53R8@_Ugx^?dACT;)yX5$}Gkhgh=F+BSxQ_J{`WQ?AZi1e4X8NqC>S5L>jm6 zj}QWcVjE;v=Oe-(BJS=96RI!>3{2w&sW!efd*8?>^p?his32)F|xz4 z0&n4AaIO+|5?h7Q3Fy9%asJPk9;g-P0eK47;2GHQNZ4=SRtck$|CS)efzrZ%rF_;M zeHRcv4X`pA7(2!WuV?nX?is-jincWmQ6p969HG1Eje!0Q87`^52|*Cm4ubn={tfFfg zn!20%ix^+nUE0|R zkf^^nsbtyiIlaa1RO~2@*q~7fN)5BI+PE;6!Ba%~=+~B5Y<<%roFD4~#Bh2W_17;} zm}N7`EIn1^M}MW?JY2%S5X~36?5|F(9bS9CaN~Pa7(XSFb3$nSLhe&{hC)8d(_8Z~ z;TkvPMi4gYd}}e;44OJ}by-$=xCGy-IdrBDaj&JmMXUbN(`~w{bU;LE2nv1SN6iMv-e;EKwq zsn~m+-Dg@Xl!W1fo2j4&{7_7&Dbyla-5Mb@E^Mwa=J8&}y^FK~E&iVx|A`~oBB&e` z$r;w=(n_X@`ju|-pq?)uYK$upxB2@v4Qjr|I;WdMVgrDLlcn^UYGi#Yb**b-k zTuV{be^g?YEQ9VO1#Nm z(@npLsV`{|(uqTx>26WJ7^Pyi#~#ThWQ%W_4%M4E5RaK;yTt1I|JraFJ3)YSDP$d2 zpt<|?KE~ePhI6iy=Ib@qT9Fs;vc+O4f2NvoSwALLe6#Log1=N z`pumyLTaR6*-!eTPvL!uEB(|)&Py746*hFj8gTzXR7vp%;S&7&WO4e!;>4liUKVD! z|9)6QrHyJ){h3pgr{k(b-|ZV6DTD-Y5^qu5$$m_qO^T+R>#+Db2%1b^51Oyj)y1KF zfHL0qi4X1J@hy9KtB5~JiLlpF(_#}paECQ%Km>YgdZMF{GX;)4l5;0+ztXGSmKol- zb5pv2Tzd;7a7Qr!Bia>xQmFQ9m+(?-UzC)_h^YB1zY_Vp@x-5qbxI)Luc+i>|4jziRObl-t0b<`WlkB9n!Zvn<_e*g$A zLECd&(0^?KvoBtuYiGXqyejdz?$e7;pC8YkOKH!k@G@B(v;O8X#-hPr9KydilOe#9 zhP;Tp_~hB__OvR@-~pB1-gMbK;^PFyfL+Gh-J8JpnLuMTJfhY=#l78WlQ#$UKJ<@{ zEMTp2f?bv`3!BAVQLpXPU{{`PjuF#Z@l-=#9yV7+w>ih=Zx3kn6}2s$-0}ehD(I~k z*tO0BULq(R$JF^SvKxu0%Inr`aUFX$>&N3*Z?`&>G_YLd?@F=o(zSN&)ewJyb}Vb- zPJ0{_&P#~|$c=bT;d77lUtQ8f&xP0-pfB4k$|{+#JS%K@`zL};+|hKw1z<%zAoGAL z)52Oo?$+4$en^}y1hn=cs~hn^G6A+cEo%(=?bBm6xuU%% zk}$smNKmbSY9ZdKb}{5-vaXIhkQ>9=kyr`1|9Wc9uA%o< z-A(aG)aedSTF070xvxWp{U+mfz>k=C32`6dRlJa!_+x6SyTnUxi{`@#1<9H8rmNWX z=PQg$*~^Ukb=g$fg{gPYkS}tY7HWf%Zw~MR__3VDb=g*gwUC$=(`L4>rciM5`YB5pGXsmSx{fdc& z0ljB~S_c6d^G6UxSd)S<6re8`+tzcnR^$n$(S%jxP26#W4V?~FMqtBnFsvqZJpPT1 zr<=_ok1>=>m495Dwo!K9@7%YK--=d)90!{+(q0~|+oeXyaC z_e};LbIs4r2|xeA&gAdsGND$2A#`KO71wtguF{rWS2IBvC`C^#nY zsPqm&=t1*rOjMish3T?(K_EB;ibV9VosmC92oSsKwZ|H1);xOZ(&v1>adSK*aYftK zBah)U ziaDUq$`d#x@$*w7vyIx;cfBNMY(0=G`iNJiyFLGjrdsDP_hP)1Lis!Yajefr<2uWD z**U@04X9f#VTKEm|1QfArv}iHXBhun%Q%J^iYP8NQ@YgA^*#b~yW3rwch|bLPaWp< zEAhcP>Ffz*s$X4;A;(gPlVnl4hT4`xo@MmLxiz*}*086>p_AB{xMDYgdR)~fSX?*n zyXL#a@VFUOBf&6kR)x1PpTpL0m{}aT`b*Y|W02T9{zkL_VWSF1Bqj9ovA?0@+3+z1 zjw9s(HVHKe#ntmkYk#Gju0V(WZVaJUX&a=53}o^Y!RJa`9_Qq{E_Do0AT~&z~kh%hL8Z zP_?){K9cOerNa!rnu7m?@0EkRtjFgLZo*hmFB3L*Nw-EK9}+9*H7%l-+CMZfFqWK2 zSzp%_31oWCV$C$rgz>u#&DPFx>~#;Dh8z{Ri|Y6mV3seSXFuWQ7%W2V`cc$Ft2gE( z+3G{cV6?*sR@Y)wh-}R3fRz}zyA~$u+HY=Smx>uu)U!Esbcl)bk$(e2z0ZS8L$(CVZk_RkE z&R`7}#e^#GzC%cUAacc(Wr4p2Rc3YiE-)2=wZDs@P<;@8mS3gGQ!UZ1;?@Xy@`G9= zP+v`4Uf$5B(FRE338}w&Vg6w-j&eoEkx}k_{z(xhTz%qlt&QhPMveA2sbC z;daG9e^R;ZA55ES-DU4ed|u7Jzo-h#WegLNbBKCN8}VlqYV3BQ{vlTpt(PJ7Yt|^U zGVhqcCwjc^1(nEKlA;#45&N!u@!?P_Mb(1BW?{XxPW%i1|WLt z+i!5!tJc!g@&VM7bn^*vhg(_j_(vEfKSRh@Un73KQJfzQ+yGVw-jJ-jnGpC1r)f9+ z7Ge0aUY8^)xZe*Ao`Q3&W|`{}nHk`fY?TAQ2HrT;sJJ44xJYdPW}K@+tu2z{Eo93b zs*pDVbXWae3zwty8rqiyYDEawHK(&Zw+*5>%U^`}Yny4Ef2*e?C5pPY8wI=-PUXja zmQ4Xs_RRYfw92vgg6A4GQKZKh>)V95il+C@ot+s~`&}{o=V7ZQ$g&Tt@O?3~NT0Pp zE!6H!@HA$1_PP+c{c=BvI=iJ@z=}L^DY<|g1E6XJNKcUusJtAHdCsA>?Ba`y@!6wr z^M|M!nF4A8T@C)4vIW2~K3@qeW7C_k-fA(6Jf3=682>xdAfb*mTy8QtF@!d5X@&b} zo@CF@0o1YqTWW+7q1$63GC;1v7+nLlN9_spp8~?cJ)%$*4uLEPKSls+BJ>1IDvq*4 z9giG)^3TB!7#O(82D;2LpK7r9Mn~G&Oi1_Ffm@wTBYhQ5>VmAPGyN(RH5ChS zM)GMazNOZPXC=3eh$C!{^@IAms$o1?&b~=wu;jy3?<$V4_XTwG9#gz)DT#LHl~sBoaj($?;iM-PTM`1=UevN^I!X{rrbSi=NxtHYvPyLSgs5fxMn zLW0;w<-ofqG{q5hm z&VT1R*ZKHI#fkTQ-sf3yuY28VqY8Dz+I&pW-%f1SIrtghZl0ShiS`4}=nyCN z!7LN!l*g#RFxnCH=hw|e&c&Wd#35);GuFy^Oi<_D9fl=+-d1R-riw~CMw|HJ(}$CW z>;r-}TN3vO%N*{}W|5OvXwy-w+KpLzu`YuCNkCKH-y9kRgffV|aM@rZ5eBTLQMkecbt(foa0 z`Ik3B^7nDOw(9yusWL6jxT>SoePJ`_j+2h=?Y_kh6^6%eIS8wsG1{DgCn{BZO7lM) zlc4VtQRi3Ki?u8+z70YNe0{IBBBrHfW>x<85Gw#lr$t;IlyI)jbT0%gln1+Jb^2Q{1jZ){DW?kvcY+6CO_+S%5 zfWv5cbVQn-zG=e?7c6!v^HcqSWa%>!|6YfN04Icro7&fK_3oJ0_lj=k%h;ZTCDo$~ z3+~vfb$gQ6^RZ7gU!&6@SM~bZ{MoS~ zE~T(4kA1i!mfLvZVetpgGa?ci`XjGZOQ?AQf`Y4yz%Nk>t?_HcK@%u>nDJYIx$w$& zPTi5ljEmt|h^3jZ`6(Bsc$ltqrwF7qd`S^&)#KJve6={&vp+cVW9v&VM?1G)XIG1p z6rJz?KqvO)jdxSP>kgwV3>7mdp}&t67Vo|0PqtlzvL8ZeL0YAd5XR={!^at}ZrHdP zKb?B7*_&dUG*k1vdmi3pk=WsiuT9Gk+hO&wdgwkQc{tl3qlY`t#%b=N{zLmEse;gA z`e%*_NHaI26l~(>XN>i*5m(iBAZjcNc3~h&5XfI9g6I(lsYYgObM)Y{CcL$lO&tmZn-;BoI#hkC{67+9lcKV!`+-G?|;#Gbvn^@=) z!@>dcuvZgzek{AVxI~Pc9J}?(jN8qxKI}<}bRgtZj{SLtr%wfXvwh~Yz8{2OT9U8) zg@tugh8CTplu)NPY@v$mTXk}XN>_>)KLVIZA$J1!j5(Pm0TPtF2-|`<%CS-b}O_`#Y6yy7DInP$)fJzN*;vOro zByWi?fepbAbfFDATAxg7ndI&xc7u~za|`E=L?A5jx$@6U&v2ffVl*XAPNWvm#_a?pjAXaF~j0y zPbWr{zz$r47|Fd`5}%6Di7Ty~*n4J6n4u6C)+q^t~nU zEFjMK))Fod`K)^lUo1PrYLTWh*uxgFDo}ag+$A<2`Ro2nutDg{iWgXI({wB*(aZ(~ zB|M{3%xq;BpllhovwiccaH!vR!gJ+*}J#CIG{&#k$(AJ-VR`udp0j7}hT0feV) z>qVwLlUm$m*J{aG+I;#i_`M7Zk-l;eoY`k&W(PV`y1`f6<`mM5?Xl-R;_Nze!ZR(~ zVVVDhi3EV;s@W3ggjYmcKX1~hGLdRDc76`JK(>P)4EHWwqasHw6)s)~;GtlXm|icV zk?t>rRT^WfuNb{lr+TGk7|_cZeHCdS(q!MC122WlU**!iZYS2>9S=5Yz5~h+pC1Fd zH_e19EPa4Oo2&3Iy*wH64Sd^`D2qr3n1*3gd7}g?YKY8R^O`*LlwEa`Wn)lFqCEA& zT$3JIff5VnH)gKA+=nmJf3XFZzDHGJ{3Kh~x2H#r_-T9nih@nICrSa#kNd77U^1_$ z)w*D9s~W=wa$ZYnbsY^lBBq#U&w8YO{HP30AMRhSb|SXdiB5Rdo^|L_fUQRD6z`GI zokpPFJ1JBr+KWgj*=K|=?M7ddbg3m#01!2Gcx=9!c>k6(_t+!>olt6?l`u}v4O4MH zii*@^D9|6%X~QSnR|ya?rwHh%;C;a9h*-a>T{0D5>7v3g?-aTgR`hnX-tfh&kAbSP zn`RNN57QX|pb9k)2N$S93ewsD~810EsQ^b#H|!ONzY94 zf-ifV>6x72ulX18wX!Y|-48BZFEA|S(+wcQ{HyBMQ!i@$!0*K zcG%aeGFIhFC1i4#7kW_v3M_!fI4n6b9Be3;adj-Ni`YxX`gA%JR6!nbXP1Rr8OnMC z50mWHLw*c^^cW-0RuI26QHg~u#5rSoA}HCN%5i!Lm6zFv^jVA*{5s#8>+dB=nr9sP%kxwL+aPmTvVw|Ki3Ssdc^8HVY_(7SLL~Us))YZFA(tH@fXL$jB`) z9}rGaKgQf>L^qZ}cs|_drss%cy=;On)`DaP#<&YCKg>CKd=g44H`HA-5;n|6>Y>?E9eF*H>e zsHdJTwCfafJvftemvvTkkTM1LZ<)DxCa*~tiINUn9O3!mnow>++BRe_yuGza!V<=1 zUxune`Hy$oB&wE&Vw~2p@wo#>x~^BTD3S$f3)(EezO_En*o0&-ynfDj7NKQhawwAg z8(dZGEsw_aQ{|=J%~6wS6+0=w!Y*mJcLTlK_BQ=yw>b^KX;z4>nNF z_3DrwBGc<4Rg23uYNJkcIcyYZRW;#MlqkmYh2$ls;;%v8heSj7?<>vH~YH$ zhtz8Ey3}fb&E*$I=mrF?$A1pAnR`7Tm`0!Y0gTOT#@58@JW*s`^2q-0tlkq^_z7+* z7D?AFlN@&MD(72NNgeMl6kH7{h2cB1mHJ}9XxmZY1_RXfU>+_Xsmyb3C?qoLKJZK> z-KP#7?RjA+vM^a3jt#1*`iHEX#^2->a5w8A%W+N5#;-N~1%0>HHYje`QtJHi>5v$9 zaIv+*rYtQE&$ypKt~p%w0bsU0HA|j5N-kI4R51)b`L%}=Yg@PHGL!%+BXee0JwD!` z4$ot~eyvX&$UH@$ro5y*^3`=H*9N3ppLHP8t)SJoW~pw1?`$i*5?T8?VcZmI*`TC~ zq!rIl=p4I#1;pt;^3%-55+tj-LJFW)5!*-`>ZYq`p^SN~*CoY86mJPtEFjXqPUUk^=0={_&Wz29erLQG*|DF z`}+G-l)JG^vD#zhK8f5?^HuZ1vM#rJFj@ydz{+UjA{j;+VD9b^!7D4m-f z8OFw@*Uz(oU<}jqhG_^fIzPS3-cbQh{538rW$j5;X9_@jA5Ii{Ha&b>b)`xAtTETM z<+iM&wY8Fw>#%>4f9u^f-246$IRUTdmt@x>%%mMKKb9cJZeykFE0DFf>_-`~cVuC$ zP8-)15Z~emIP?CuS+L1c;?rReVy_)HQn+fox63`+I-~dm`WkCX*=!YEWUpH(C~FQq z+;8)A$gp1-q^*a#->%E18uXiuMCzI?p9MW}n**`?L^_W|m}`yIZk#IUDcWnvc_S6X zqt!QQBBVcDrV5d;u+lC3k!K-%Nd93y4XeTD9Mi=pxNszObW;R_$1NaCZ}%f z(}q0tI&}E6CGBwNjJW)SDolhRJBsnIN{ABH?3QX+a+J({U3UIsVd$MGUg}VK4hzA; z|Il5XwseAWf%2$$lZn~Pr|XP1b@w`DoPAH12o%|6Qxtu-hN5b1w>lt9@$EvFTJ{;d};}f~KW|6C4gN zJ+9-UuSHeMxc6San`taw-rtW%tpPv!6jqlLpEHnp=y!bB&I^UFo~z1a!>sh#;li_I zumkm8n&~$7b1__NilX+~I6IuEZMN2;;f}4M8Zp-N+6`;x7wqb-BI3kcZUQ*<3|L+? z8QpHZ3Q%gux37}!S+7-sM(;MQXWn!?i;;7nhN`56NT*7wV`GB}C8Xe(=gZj{Z6JZw z7O~5H3LRNIsO_gle!&+BZV)z7y!-oednZdk)XJ{zUU0ee2V-Ym9==xD@!D{0Pc}gK zc8hFIXT*>n6t6|(^pY^lrZZU*E4a*Byofqb>O@l@6te3mzfbw^m%Il0YqvB=#hK1& zZAvK1=P7qHf@QMGgF>HHXpHe^`|rPEjY}1?)$wF`?zvccGA?Fzd|0n|h&9ioKmPf{ zwus%FCl9U#>iZf-Iz21PRBHwLk}tjQNzPxgs&5fl)!7FXH$?iv75>$beN5fJBTWdJ znHr$hs5rd3LmpV_=f^8z8bQYEdtVhzr^Zj+RM=b(ee-6kHl}ble2&g4coIxl0E4;IU#mEhQ3R*zg%TW-&GOr^nCKjILh1wh)`&76XKb@)NW{Wj zd<8&APU_nds}Vib9mPNORNwCXT~BqLhN1f7gnjvHl408>Rf}dL$+BC*k2)RGS!1vJ zXp}&~i?dObr142us;O!D!X(hnk|v!s%859kH3k*wg%+BmNiDV3udZM9PwCQcp3GZX z97_^++TsxDa=AxuT7oAZYK)3Kb!qpZYUaG>*f3G3&@xbuveks^w3>5_Ctqh2sTn zVrgzb$QfP6pZHhSM)D{iTv1*W8Py{`>oTg~GdsMC)huh;2AWQtJi#$4qf9()Tx7z< za;Dlu($MO3bAtk3-BGK>ftrhMZczXrhxyLclhkas+jo>^W>^WD_)Sl>VHp}oOa(i6nCn;i1 zG0EOfZMy5*$hYn*dm$NiF-&H^>heqIl(URr=AJxuePXKLsNk(z)R@b}i+BcLU9BX|*Rib$)DrNITyKp8ftoH*Fo3@N zJ<#Uz$46o~xj5(1U1r`1FUJU8{*OAdzHnhjLK^o%-jxmV+%L!iv&Q-v+njFb8$Y`M zn_BYTs|Jnm&e;I*xEBJU`;c_8%3QJ%bW7!@I;Rhcya&G2U8$@)k9>4|R)Eyg(Jpe3 zxR9Lr{92PW-~*thZ*V;S{+G)GXuaJ09Y4AWx7fL}QV8>!T!7nx)8dCBd_au9u}^Q%_;0%GZMW8$T|7s;6n6N$@n!s?ew~_l{Li??DTSupy%I@E;|fWgWI39#8|6 za(6Z!40tgYh&=Av-9Kww=w%^k-bV@m_QfaqMh=Zmp_S-twMmd1MaD09KS~hs$XlIU zs@)kFWR4+l%o@G#0)$cW$CIA(1`8dTYYVB3FtO!9`JL-e_A$$api`-3$g5S!?~>bj z(UG6*Ogf&^ce@ew$5XlZjlv;vr?x~Lp6mRqH&fRlTW*MmjkcWtD06?v%J@j;3xo&9 z7c{jW4<)C6ftp`^LDh~L5bMaF=sBon#+56-FSG-@80mDF?z`okWY2lXcUIQR%SW`7 zSZl-kZj1144XXq-y>jeV=G+%k`?Z7d_H*gPAz=axvEc?DKoAxqzS!U@E5`cDic6v8r#<;}IJV>X$HAIVy``zX z3Q=?PG>2xyJ!JA2Xt@%HrkR32K48p}L?3Y0847T#zINJAPpHCaTX;DjmQ-OV7Z&?SMC!Z;{GGqx9=*j_MI{ZsM6m5mb zhg$w69}4)=D}SBxwH#~~1hX?8xpdtZ!jS(LWOjSzhJ{i`_EWpHpU5T8JfDQTm2%YD z?A`uZTabQz`swZHn}3lCt;?cFg>Kd~yD@hK8xSr&@F?dYZ5n;6$0PS%m$E>y2qq_2 z%N7fy0`a#(Xif~GCQ4)xdGfFk~R9m1c13gc@+N0!D~hG!#*-C0=E_Bvyg^9C|>5l0^$QhoMIPZa%|o@hi_ z!vNR-;S2<-6SeD4y^MPU$cO)7|5ui{#-KeRrV`{uhE+hWd1|A!vqCIzu0rcb- zHsd~y=agx{e9J3i(5N-5+HNw`l#u%=h-G|}`XCP4ATelAJk03Ix6YEymrNY^?#0q` zy8y%^MY`KEU{=tRu%eBc_RSyUEO|i!#3r-BQsvDR^32mtCg&E*-EF?sK`jvv?7DkQ zo*gJ(4B1wGHwbbNz4y<}(gY$m;|bgl4V;6445@md3FBpPuDO!mT4wsuf^`zErwqw; z87E=LLt{{|LOEvjqUcD{T=^1jokZ_ai)gnqzN1aFf8j%I6WY+=2B^PxL_D}+MDtw4 z2RfzN(O2{_IdumHb~L<}H_qvezHsaL9%wl3No-4MOUnsL`SLCIefzHAM_SKKSA@`G zM>##XFst&k*t3i&7NVL}( zRU_k1I;nzjAi^TY`LnhD(#GNw5}Hw|pyTo{?e(oPCmllnEA{bT$&>#|zx-DM=f6@w z|DvMsUrD6@N<;luBJ2MZsj&IU9s`G$Z#VZ-D@TVfeW~>Eo-8W(lA^q0+FzG4A+y7& zG(`Mfh*iu9+*$Ogg3u+);Ey0F6jR3OFa@Fxt+rJu1q-qJ@I;3^{HN}v;Lj9+nLKC{ zw;|K*R|ir6T5L2~tjksrQEYST^=8bKK`+E|fv^e>ng^YcHE;st9~vL^6sY--@9+$# z&AW^grJc@Dru45rT_n9d(V5m3S6-~3_BR^--D|vfu)pEBF(NDHuGZa)=v?HdIhl?* z*FQZ@N_Sn@DMyvg{(qpn2ZO;9t)O^MHt-5r;+Yn0;5W=m{c4k1OY1Pj3Yv4xLsRTH z*;(l@t3hJdHM4F%0g!GVP80Z|8Cet?DRu~L^L~k=^EFc{y=Rc?Hsh(A4l7gVyiGxx zgQEe>gs1VfSrPx@{HW(g-}~WG2KB)>$2EAscz1o5qt78roH_y;i=2s zlih{wpmnj9{nxwT&x_2zZc2qUA1DwY+M*GqqapDT_xt)4$^TlY!Q-d=oJlLg!(pS4 zkYo4$_F?wxi!L{Ffk|$(JuA{(? zr`kHtH5Q70dn$eo)gE#~Cl$~Pl&8>q#S+B{V>Qk#Hhe<{G-c0tEcLm!C(8S`y}3!< z4%!2F@*fv*oAP+%X4W6#Uzhj%uN7Lq-=DvJ17um`uX{k%>F2MIKdv2EfASw;p6ube zNTH?0CS8&~3zoEMmtMZ+&V?bZJokcG2W{0M9C~CDks`|jgAERdf(qjTXu_V0P1eISM&s{Q8?uSq7&$4Qa9j(}b zsy>~g>gk7n`|FYp#r?e_cZD$5O$^}6y6Sy*RI#|nF1gd@Y-f`(ad`D{U(#e32m|$- zz2S&K#t&?}`J!y9w41nLsmq!Tv;aH?9*MWf)3Bu4wr8gK(7gggq+gBzl0hTWn^3n* z7~i55cK+H`hkSnf&f~k}BqmNsN%q-HbaId)raXpaGDzrq` zlH74V#aQ=m{HCS4aWJ>J`-(pP6SLC`D=28OQK5u^YPV7A*Fv|ztW)?0aw-{iK|vTQ z78~Fb%JGAgTv2zVkn9=8^4N` zqErsa|5`A=*}qS=(g!6JXDjHsWbW2DDgS8yaRv3*lUj&Kk}1##Y&%We+asJhbK;Dw zQYNO`i%ffzn z)0G5$=H~q(Ru40ci*ww}kq1K&C{rCiTsMzALkWU3Od0dNpINdtTj#PE+NCK&TE2o+ z+Mm%HFYP@v;nHpMe1)^6;nt>Y<&RO98P0fmaNeVwser>v(WTzC1zTXDFYZQ^i&lrW zUWd`MBs1hWJf@`zyZ01;oQcDA5dw3nx(#37`nxRdwY(n#kFrxpPxMMvP(#-erT1-Y z1!k|8M`;KzjedERvbxY|PGs;x8T{chQ~!uH_0O}R-u1&Vn>ZV1(~5xJu+iT2PIN{V>*IL%3T ziE+3oVcMyo3FP(|yWK&2?MLxop6jS~d%9MN=8DdF>2{nElxm^dSx7`8m1P6ZQ!Y-A z!~V1%Z|EbAK(668lTWz=US(7o(AAQNF`{-uPP`A{R39(EpmSwG7C_T*rervN4yGGE zW>-`o)H%!Z8RX0tKMUQoZJ$S6@x60A>@Vu=l8@5HBbO_;XRu$(-8^a^@yidrC-^U=fiGC&8%5J_`C^xzwZgNZwiQvQAhrrt0Ouy+OU*jlAXF`Z+@WiaNU+ zHkEcOHIMSa>q_bNp^xJn<6E|4BsusY^ z14o65u6|a4VVF%qTGHDX1$BEIisOoLz^F6Q^`WM`#qYN`h4QuV-oSdJyiPOkfW=*% zhSn@FDbE9|E0~vX+W-z7Yqt|LR?n-hf1e-kKkjW8BTTemG{2D~2J$M+zBhXu%d$&> zSk(^WywbTeV$)fDmoH6Y@zY(FVD+=iJn?(U=|Asc>p$-z92I!XeafUA%7)FAT46tG zZqx2wOId$1e&_!A&5`iUk>c`!H?LoKrf1c6LW~h?oITl-CJ~au0;Lk_7zT2-m<=2 zZxkTQJqJe5d8`yz+a&#;@|;5ydCm=c&X>H~XCqTgn@D-N9M}AopIiFE79j0kC~;2f z2A5vAKzvKXww%_BhmmMa8H|Pz``_%j{%#*cD_Ni$c)2=Y8{?|+Hjaj2F_;1T^dt@C z)9i1uvaeqSY8!}*vg$lj``l6JQNtMtpJX{yAC>9YSGpIiouw}ziXPL&@X5`5pf%>d z@bZv#0!8)2Xg}rB2CF_uP(J74wm&a#1OmXKoR82PBA0#P)jOe4L0F<3WB}lSlh+cx zV4f0X;Y~n`E}L>?KM!nV;vdWN5hSQ`J4{>}G+8=xOi#_@z@p8nQp+)fiTFLy-;y}^ z(=@-{D6R1Ge)fTos(>TRThb&tmX1CfOAXc7O3Vgx?Q4TN3lq2TkGBrMLgRh?vJg;dYm2ZCjD94l9>(D-nJOo-t<@?WV%37;l{Op40&EWQpIK&nuSOP&SYzzk%l9*7-uc%LT{mJB$QgLEN81XkJ;i0mZhy%29*JU9zl zUUzD1G1mE4KObHI@0@9uQ}W4s5~+uPj)#g5Oh)s)KUlMb3@JUIE10U0>Nx0AOJIRm zV|BjuJW*RO|MJJJ3!v$de(C)k&x9*^e?_Swt0+D8OWP!zM8>B<(yOLx7&H~OhwsWd zWsnlz)8(+-cR?NuMDi8AJB#0`LCfgfqo6Fp% zHM=OnfXQuHxs{oV@D!RC?SXBjya2L2%ic$n7YR~RuLsJpbr%<5+*kWPWBXm*@ss`D z)01=z)*SvV!fzl}8@FBB)(e490m0zorMoQqy3NOUw`wg44Ib>V2vv8k-E$c|Y}@8? zs*DbiPMMx&Pzpi4y~FIdzdL?3+Ab$ULjoUm>z$H9+tM?L*ZVbTSX>_?tc%818;pzW zM}4L__ijD5QWEw-;&Bad=_{rK&>e=#-mz~WJV?)PlvMSBDUGD~&*xTMj>l-nZaasm zLd?c%`acc`oiCZV6iY=oHTC*EF0-w-#6_{^0}SzM#T|$PZ8w!kh+!nE>W4oovt>gd z&rRmh3mu$!z_cZ@>l9^*Cq|Fet1 z;ht|iapd7+>2#AgDCFwzke+{xQNqWCg1hdBXy+wB2;E2OZ-+E1azadsv^-F8NFIi$ zDPnRPp$FPn8u}MWyRx{(yz4ARThhj0$2N6gB|%H{is+S3+sRw3juA`3bb)C{+7E_> zk)dVMJRBnXMXTNSRd{6>sa4Q|M~EyGi}%aXzkq|zECt2FBrmv(oYlo1Ct`LMM6_4Z z>v5z-ag6E-3*K83H{bKr*7IZ6W*=Y*OrFU2_TK#QIYoJR!r&evr;Lc?5Cwp(9xT16 z$sOmaJ>nE_!1`E;W*%-B!iVSJg7&T&v)4YP1+BB_&dngM*%^_c5k++g2FH=gGQpFT zSQIC%#bm}Tq#jM9n`20pdFM+g5!P25@ z(~794uEl6r{lGn7cIk2_+{im{{PJx^ul?eVv8AhHSy{2?|qGVk*|kL$$}SpM4Hn-0Q50{yDMAGn7#2JB9i4!E*(d`!K05v3N$*8_@|F@M>EbLq#SCl1?@BV_ zsdSbR51@)~z=V1nr71(05FuBWDEThiY;W5*g#%y%eZk4I)686DWBMe`0X>gqqm;I-U@Aq_-s0(3wG?HDYK26J9DnSy*Q`LO(n@ulpz5F9-v=9>KL~OnkbGR^{^dJK-^ zm}&&SmbCX`eb+EJ#yQmwDSHxB6 zzcDy(i8w4v9(59ZFy&Lbebag}`=~xOq#~Dxx;0>XHz+HJS>pgVsK;gA1y^1whN!Ps zq-o5uvnNf+#nea70zq<>^Brs-MEkjM;sC?O<@Frbg7(0QNnP*xerxsw4n3{FCeEZU zE?*%^qn+fNWUn*E4?Ga6{9(0R-D_aQnHi+*D;P0r!lP$QiO+u_OML8p+W8^o=1vGw zBPk6-A&J=~)%BaAEG_-=GR+^4Kep5oUW(+@+bSP%{sL%fDiCUCS-Xz-++ZJ*^KiSW zd}AoedM$f}>EyMGjU+8<9;C?@5@-vzFiO50j*ohBh~R@jGf2LGGm@4iujOYNp+J*?%%DfO2Zi| z_UfG0(bne#W0#O9tqut0HCOm@UcbXdCjhJY_Tpq)Mktn3R(SW_?cZt4Zp?fnMW{TH zsoZIK$Yk@ye2k}1U8oZKSfaBVk6c8|3dB%w`yq$U43UIWPBVacQ5Wm_fYaVzia!O6 zk6#(SxOrK%M2KNLO2v`6)>?5-C7I-12*)`fNK3Cm$u+NXR^A+T-<=ud=n$+?A-0HGI*v)IET@VH?8*fYbf>LR_SaqK} z{z4lmKzo8P0YsInWUGc~I20UXSe?zcMe26T@yhreUR%h;(6K1ybS3z%*5HiJg)NBz z;1HV^z%L(OvyL{*Y6SQ;TI#?SefvRTXCy?CL*;<>E!of>%g@*9 zFusHES+6RHMUk+%R{J14v)oe9Y;TA3Znp75G6!N}H@_3Am~V;A%bPg6XG`J;?*eI~ zrgKfsdk|bVO`L?~|52>ZfUjV={$W>=^DhyfMYVm&Hyl4CA%>3u++L+>_XtD`1{g8{ zS22eSyj_fl2yr=i4YbG_JSQ_Z@T9Go@Gq2TW)^KmuKQO)tgnDd8_I*g{LWY&qVyQC zW|6=M5>k8~C@o)!emGUlBuUtIKInI}-03uIH#CWm&yo69V7tJz6A%=PTclA5a{GAg z>GMd$B#9^oG#d{c{{WE*!IN^G~q-{9?<@$hU*oYthl_VM`G# zk|Vt(`36N7uq#8%xLkx-Qi9N#p;JXT7;2cGskniui|}&|qhSrP1IQYh z`2U`%YR^4AVe_O&P~D?ZVt6@MTMDK(>-cY`(?*QdVxgJGw@p|Jsqd`jo7wW~fapAu zlK{4_5Ae~An)6{^7$k1(ycB!rZGMvsGk}puFiNpRz^5$METKSfE$5FXc!R%SIpwx} zk}v;yIe^xrjxlJuL!NGQ(_dF+EBQNQ5EiZ66fdoa{_^_z6{p97L0#0=a9SJ(Z+^Hz zDJQsf+az`F+95Hx6RUJxN!mB;<_}P)7z&FMwTl37^dCQhN=8U!3^XVasG{M-p zy7~u!C7dgcMzo9(9&u8qO-t&1Y@tW;L3p6c5Uz&nEP`2FDBic3YL9eUzwD{k?L`j8 z7}ZNV>`NePzxQZo zo~0^$zln1VXA<%B3f@DmrE14L;hlp1fxO$KvvpmTB=AOr;h^zo~}pn~V|2ae(j zY;-7>U4w_1PY*_=(6L+@j&lc12(UIFV-};Mzd;F~s0-%t`0yJR_ui2FX~;^6*(*`U zE-5sU^fqb!j1a>9Ekd{qH@>YM2buIYr~BslADvTqq_8j&wc71lB>R2O0q!MN_P7j8h3Dt3Z^q?;f(LnvnW`0UbsTH(4s1|V!Z9j z=j7945UOh-5cgPsULaYD_Z6_h(ALppr7_5lQMH?f7bgKMXtd&gx_ltpd@Mt(Ri zj^eObI2Xy4j&3Z05S<&yl9p4`?ClV1^Q)Y2 z5xAhX^oW~u8JmXqVZQj2^5#Q;Fw8Yy$7Ay`ynK(}f+ zy%daDdoT1VIyIPWC;E`dWBKX{A2|NckJ8BDan5iI(^nV9hbOqfpb$w6nHTJ`3l#`w zFMc;CuSS|5jsz>O)U${*%xrfSzuJ7LAei6oi4Tjua$9FZ&U-nDV$0FcOeE_?#zi#x zK2Pv1g?FV-tCRYW#^Od9;oS|(GXq5Jl4@Uu3s?KgJ+ES=mwH=;c0JPefk-%(I&T_U z@0hy{!lE!pK@jq_1oP6U^BWSprzEhc@$jmMF%urke@OsDE|j`v7{S*lqKhFJSvGz! z4B*>#7UNh1EG~S)Bx02gTGB&d4lp02_{e)WQAq&0k4W5DzJ{*leVz^|&tRAnl9rBT z<*udZKbfBiNT>exu8IvaQE_*II9LH)(XVVlhR62ZZQA}fYkLh$l8Rs;No-yetXC7M zbZ4Z;mrHj&sbQ*Ajo%`jcyA)TO=8HX z8LX?ImbrKty+}BQ0D$ug$BG60=|YIPvNqEl7~Hho6>oWFE7QH)z>{c7%mE*K%vx3} z=eXhQAiRFQKcOvTt$3SQQ@yYzKwj9_$ zHq2U5soP{`A9sQ<-`PbRvQ61aY=1&EKQqEVk+?Lg<_hm6&59bKPRnC`M@&myX)EsV zZ(^^D72nR9y`5+Gl_3n&FLy@C-?X*nF4SwN1}?41y_ISgh;UAIUqNwrsK;kXp?^N|1Yv zD3LnsD+aB%6=KpRU5NpG-j24_Tk03Y(q60?dsD*=cVPcMDbyoi4Y* z5ampyP~Bq(ZuX_2(vGXrTvPg1^`M8Qrfle>=@7aUp^ZUI+1UYi z1YGo+{4LZKKKn$vI+TmbRuZ1i#YWF22D-DlVgunMx!s>U=0jdtPO`4k?Eo z!V6N}5C+xKos2S!NV1xE;g7Hk^${!?CLQ`x6un7B_FdX6=r*lPCtT$inKVsqLy&ba zB54A~SYMw7$SDcRn|MH*`jFraK_s}7d-dm{1P()Xs7J% z9#sAsT#oJh$mg*{e|l}0MAjVNDg0PSRTI{?i#c)AS&ogGZ}$AWOx-_ky5<$Ex~NmX z6cR;cKpj~NkZeY=Xv{KCYYf4&KM67chw>;K1JJ0hj0=E6c?(GZMnHzU$F zBRK9_-1?flk@<5?9OY1HRw7VS7lp`1^x_+osCDB(a_;(z-Tp{jsIE~%au5mQ4l^JJ z&|52mBJSDe zy$c}eU)zg(2{PGn7$-PMK><-0`Tp@1ASOAF%}{ibZ)G80^NRzBGyPR!rhJ8;&mY^~ zmUW}191VPH1EA$dq>;Lr{$M(cK@|A+d(Q9#Sc~N^81l`&KNe4P#|ECnUu>i=8-714 zX+ywqbp3^i8UJCuzBB<#E**yKX@5I4T^_0vjEquBfB5>wiZgjjz#Q1lEbDmBGkwl= z0%H))%{L~1p)gbjYXljByKlzqOBmiLdi}c+nz+#EJ)Mmqlg6NIsE3T71Vy(Ws!{Sm z+*K@Ko|YtMy%3YVbwpaCO6xJkk(nKk}M*Rduw#6?hH>(>1z~Lk@@V4&D&C?z4@nvGB&13ssZ_*EITL zjTZB3xMHr33npl79tH^~OLVh5O$M<3t_E{IDUg zirJ6Q0UWQ)@V&a<;FgG%4J6Pb`@vbv72oDv#5t~-WCd!QWl`?(qWJGSau4AgAY+h~ zqywcxzYK$31R!pPYA{VTG82X`ni5v^N&r9fzVsly`o(tP-5-#`thA$^m}Bpkm{Inl zkkyde=ub@~N?YlP*?-ZL{_%#Iw9EPPCQXFYgNAg;fLW@h2taqzu24l13nVBJiwo%Z zYwu`nYr}XYFi;IXKPJ@;5@`wq`{F(~V5%XsRz<>trCyg{tf<;?s<0!Ch4w0**_1?I zuEFnj6z8%m$lAhuyFt3>@kIfpE`x~fvm0nXO#n@=yR*TXxVq3jKiW#W8lavV1E&;K5fzh3-*5)tmtVb%l zz+<~H?D1qcP#+9t+6kD8}T#cu#1 zJTB6aq=oa(B6w%5%7_iVHk=gFFaxV%8|nE(QhGEsv<-Y~1*iLEDDJsw)8Xb_Af|u7QwC@tOmX(M&cf{i{@c_f-5Gpha!^ROP6g0{_k8_KLw7wtk z%2=XBvoB0&^u2E)L!8%Xuk<0r5(ka4=nd()y}S0!AW0oSrs3vC>j5MxSa^kuljXmu zs|RwWT#!HHi?==qBCie0cDwmNoV>& zZA?iCl02$g)NJX2fX2A7~!C>z@o_5b8 zNg?%lm)YxnRJVvl5vsRKt`{#OL7peINi?bLe{-jKYiAZoNSXyu6HH)MYa|KpzDcvFM0#8$ zQqXiQI}1zs3Q&n62=gmBSpv>z;{g!I=h{T~yk=#!ub ze0UG&9?q9={k<1A{pkp`)KCc8*I?La0W2zDvTDFVRjhh^1>ua4Iy?|KRum4QYwz#W zSzNZUIOah-QaNL;-VuAsJqId#VAIS;3Y83vPFDfp;EB#6C`G~t-r+wTT7s>YVSgLq zH-S_J6%>r0q;2J==k1|hY0wyG_We3baj^!a@2nu_j4`b{= zL$gTv+j_-XRUk^C@lpMi2Pw+Ii^coSB(Xo#PEgLX(6KVS4$5Mr4ikiirr8C>$|HNG zns-4nd!S20k==UNLJQ4*E`F>_V@;QMV?8HiN;-`X_dya|i4|pqW!lD)3lMuh1Gg5y z_}UTyA~YryrxUXVkTI?-#$i#C$(mQUq#PXn zj4+pu8uvL1chwCb<6B7)nFu=nvBaw9r!61-N7aCb%P$XPGwuTSobh`LXeczlqKBz` z7*(mDli%+(&qSZF`5tgM`O9xSW6k}TwVAuoGCrlH!0>9vkxA}o1bABL$k5>~grTka zNR@x6Twf3BYC}Bvu@g?wCulSg{!{ZSW^u;}C*D{E#r^n`!FjUQSw?r1t#;MWJMPV! z&)KsH!%gSgOh;c%Flet)j+j}R^Ecun*zA+(SVKR9H)#~sWVr= zqw9XSPWMae4uLybEY>~0z9h~+qa96=Kf>2GDYea-N0b;CnZ7hib*zK!%G&}uQrN)o$pcIdQQOPnrIa@gaCU|>)%@Hyx3{KB z_&Edib0?_cJ^iU=xzV4e2{?j*Y-~mwW{8cP$pMD%e9MrV#2DL*YA8hkHYJoUO^lb4FWu(lU zTLOH742Wa8fMx*|ByI61e4cgHG5Kg}9&pG;BVPbjtb`RilK9wjyA~-zU7|6?Z^H1V zaO5`$P~B4Ofmj{yRC7Lw?3T1=*hpf0Kjo5aH~;5e22LMefjQ!~gs^Mhodga>O|s9O zOy0OZko}b0=X5QO^#ku|;TBlmxf>}lVC=vbYvzqtK*N|VfI%dE5m>Op3(=nT$H(j$ zVJEBjuv!ZHt}Qp)(Y$>KsO9k!rveMeO|s+(2fo`dhr~J7F73q?$|a>U{Uad*F%^rB zfC--jV0vQgzDdE(mZ)Na$Pr8wgtEqs$DgamfKh6CeH|cXggm-l!$MSkH$`R#0#OMB zepjL5K`?G*k@Nh1pIf17UmA&GoZ5!9xLR1DOiEX=_%BMqY5%@fVJIfuu;zQb`H?DT zV@j6}c0zl>YT<=^UM{~`e583M!U$gsPm@n>0j#z8IMhv3Dv6867&|E_VKGZSqp>h4fIU6TSfmjs~>RYoi6Ij87x0Vi8P+%D8OFO^A4YtsiobV*1f-6h?jbT>##!$P|IKNrU1v(G;JykE|B z_H~^Px`K7z_ndR&9OE~}1inhrs5#hh!7~cYZd@R+vt#uT%;(X-a*TY>Pg#e6c3Qvk zk@A$+lf(wowtVjtKhYZ%0mX4373c_dz19aa0IWU?w^zXPq8kK|*0^)9OO*lHTz(LW zG782E0q?5q0(wP+$ir zN9>2SbAAlKVS3^O5s!G=!C&_cSe< zBG*5xh<-O5X3Oe~-3K!HaVOm0p-4l(A(0yjgAl7UMu0|+^(Y2hnJ;%Wby`=>-o5vs+s0`x7iHDovZUrC>G((BB3g6(k zDG9*Sx;?JRU0o~=TiO2`o^k_JR{nSK`aZPTR4WT1l+~ z;GmHhmRIkdZvs7BISyhk)*XIw{M+x{nxqEEg>PQ~2t_8qeKG9uCSS{5m~jJf6~}e@ z%ZsNED#c2T2k&RRP9OGUk#Z#0-vtS(1yafIg8)$gDg3ZdiT{VStN&qblFa^iK*;ca-zP|9K7CT{h5#_J<>Q^#U~KGITE#9A6Iq? zaPA1e7;_7YX5QHbtKoPT`ora>LWD%fcPJ(aBJH~_SAYq~>odih1KZ#ju3hrmFQE9w9!j96R!i2uH^+O;nLIR zhM**+W|!?r-Pz7li;>$?YoPC0%r?m9980$M0@wJ~DmdFAD++5yq2 zEW7pb#UEPnY=ceVbc$zG3p!6B;vj9Ro~r#m&VtQU?qKHTjaT6?{tk(V;&#Dw##vnN z3Q*4BYYoisFBALi=5P2Tl2O!E}Eqd5C=RpFIBuHj}si zy4#=lsxVqs)?M7MG{2q$d5H4|3VJS7zhnMUc8p&mZlTTostSWZ%KuD9o5=X@ zr@R3X<6rm14;}gY60Sc3)21`N`PabSfQ-=ZOTzyBlz%SaS;YM7l7CQw5203nFAVe# z0TR9BVShjV`kxWYd(bNXMF5E2KRoIcV)L)R{Hk37nJSb}rN95O^3UBwS;_udGKlie z%6~ASL=;{6znBB^Pg$=pM#i_l$JU1WTj(LEJ|utN&GXNE8g3_h{lx*oNq?yM#^=%B z)VzcChiIf!T7M5(3}ga-E;Q6b3GeU6SpJ#LciOSPS@70Bw7~=_^B)crX8q^+q&NN| z&A$p{ArPJ#aLOlMXUSsm%X0IdCDf4b^nXSOhI0q?7x{1fRSgPKTfb{`eGvaF4)}GO z`>#2O{eE*bqlMq=l>Gb6)z?(%eB{uBN! z1b;0o_{0DIhAoUS3yK;P*UF8ucx}%7SzlTdRgUWwcb87gjXR%X1W*(!%-;uggwLqF zxpX^QcjH@~$GB~|Kmcv4NzuXhxS8dYPG_D{C&o+T^5%!Wy5@aWjbc41_r{%p#(oZx z$DGNeIhOM+;D<7EQD);M3LsqFXYQXvDi@S9fj@dLh|bLXjNhaPR;-(`{yhFv-bbhZCYna!)Q#My35_M$C&~Zu^&B-1!%> zVY)CaW+thL$0Vy47?vMRohR%jGUGucQ;lotHO^-B^TUhzDiP4^!O|AoGEF1&DCl*a ztM&R?T001c5!HJem%yT`My0|AIMX*VKqM#bWOI9X#?a3BP(HGm!IpNkVuSc`n3cfz zmcXk{!B(`9!co*Gsb#S1^J9{3Ky-VxG3urt-3Vk9E~;z#>=| zyEXN)Jud%1=t=cGc1-PdcIfsd$4SBf{khyp?-{q+;N?(iAq>DDgy=N{fFPM%2U|21 zQ_dHXW_<^DbU97x(lX3nsxJz}2&UUuV%LBhRu+`3mFkhV2c7glyfD&X;FgKQD_<;VGUOWTRL zm?qY=2Vva7Rb=B&$o3D<6-|rx0Vzf;ih@igEa6Kf?&$W!*?`kxO#1Y#@w1+%Q>VeE zTk+{mc^qRki*Gf5F1_2~+gMJOyzz%m^8#ZFJ~r4NpZs3*R2Q3sL(7kw@ozC;Mq11= z9rb*^PJ~mYY3(P0FlsA{4+oo;$AZO+)!tQ%npvVG`I5)EC5ujv;0=T{($_VqGOHh{ z7k{Z5U-oqF*!5i%Wcsz(ulucZ*UKu#hc3gL9K|lfh{Cwy? zWZbJ8v?=&_ACTmdWbW~0*afJ8I;#eVM_91bbKn_3iN7j8*I1nDD-|+{V)Y>5n<}e7 z;fk*U3xrk6R282wh5wvqsnlLtUas~a?e^jeD{1hH4uT!K9P`&USL;`9G{Jhc;6k7f zVtDwumYEn&1gT;WEyG3N{$kBg?PUeZj4_-;&FrT~OsS^o@8<7pPTTcjC@FBJe^5bI!8g)sSNSUFjpti#VEdwnjE{A z>Si!EsX6}S%l3dLq+u5{)>Y$|xRdp(f*ErrikFJmKgYQzw0enQnR@x;GR9cpVycMW z@Gj8JnczRrc=}N+FF>|O^+$Ny@rgHihzSAbkiQBs4Z5kLSoG4%Ljo zxo@scYN-#p3DLDz^4&R}fIjO0i9In7hz5*PF89PaV^xzu zX5cdQlB+4?g67;bUHsS{M;PG^IwI{gd1)g>R0zBY>dt3bR4C7l#1)~OIq)5HL_!a0YFdDb+65301U?zh^AfTM#h8YaNMs*}!l8>xHSZEM% z&kdDH!FPRt>;nkGsKUgywMch<3d<8j+LLwTkSY#c7HCp6%D}y=PQCpW!Bjwt}26j&)kv(qbZ2SUz`DD`32HUR^3Q)%x1zm-K$<j2Nupv6i&u^1GR8BF5sQC-hqM=`uCi+phSo@ zg$ZlT1_CGc)iT;bR;Jz5S0RZ=cLJ-X5k52t1i)_sh;~r&SR;m&u%S-_3C^`}ekyN; zbKL=V2kX@bg>ml-8ut5^(kBUjxc(#|^hkEFcI2|yp8WeW94!*J@cVbJ(;s;G;UV9? zeQlMVQYu6<4UO`d7;oktCS|S1p60bwx>{dt3n=dG^NG)Qo-M=%+uIp(wSStDspYCX zj9WeyP`A1tCe0ZeU*pI=PxHbpc|sd?Bw@wu1i2&@gO-$ZmGK+&D+(^x^-xFy3a4*-gj<5hCo&c8G;Wvd;FFFQnS)1Ke2=JS~zLf)jc?kFB6q;$i6}D zGeWb#H|V3~^@bCi_;Mte9ugXO5u_MNnEc|;tLkQj#hKVQO3!!giQCQ`SKxR=g7&$u zxd#~-P5CN8pE+mZn>VS&+DK?5@{@NSBSOH-U0SX+Mo0xJiTsM`N3wVKAo8ac1o)5_ zkS(fJ4ain9mZ(`Lc&=y9DY9V1Mk2Yk{0xE+81*5z3wZXLoXs?5W*7!7csX)@;bw)( z4AVPrhmTS#=f|yk=k5M8940Sa-|f>>Vc3DuOs8uJSC2uzehKM$he7)q)C_~UgBS0_ z$=qVlBnC{mK-mr~lJI)H#tVoxLPKz>Jwk)OAbKei{JFfF1V=BbJl;Y@Tu{%hfeCnA zJv&#Z-;pm-7EIrf@B8kn^LDbD%&>hL_$XcSibOx`(J;FXW||$}03_l3=Dtt!Hr5qk z-2e#LV(%t5bJltJQ#CwD7ZltDp#hx{0{_~moWO+;&O~rhGjA2zpzcFFPB4#d za3)?cEbTB*Di?q_CJ(v343HcWd_znjQ?Ipg^m z5g)C(1%bvkIdDiouhL+!IdiPK=}_pLD4&kll`Rn**oD)dssG&aJg{6`kX=04Mfkxd zgl6;j?Y)eT6Rx*hBzka~JC(3OHhW+=GB=n+&TVJjvFRj?&q2XZ3MMC8KqfMr5a;an z&Rp2isvErsp}GCn8f@>;(110-Q95sb@}ICmI4&d(%IiT7;dTd-!g-F^^VH;W9oq(> z;f9gDJTiplKU{*Dz1i8%Fg!oG`r{{Y0D5#>t}zb{s2ZxF}Hl)!5I zM-s8uxqWA+%{NTvM~y)8vo%$#F)nYnkGxTT*5V#Hwr&S{P5X@N7O_HV&$Z(Mw0c7M z&!mH7-^~`tkurSKVuL%}h7BW)C_bd1)5^jEGK5}@1(r`ffIW1Qkyfive9-B%d|p(i z=Q8PV5=PNW_0t3?O1W9hkKYDVZ$G4IL&41$?Mft;k1HYJ2~Pq_Z*Emi)^$++e&$J7 zm|gLO*cDU17M}h7*bu08AkFRswnf0!Ox&eKgc5MA(NROVk!?``gclKLTwqG$B7vEt zUO|N7^le-%5(4zfpw#|P_A*&ZGik9aN?$e*Nfv0rz{3yJ#Ig}ak;nZ|Hn%A*>~%H< zEC?NvUl=bS>|n%3pz6{Pbgp&R=Sq9`gbyT4R@lKQxO093u=wgj_DsMV>k^pyKp1_& z^uuO2!f#|>ETm2WGX&RHTqN?`+ykn>hG5W|1BC&%<965mAm5w2-%8zp$cn^^;6dVm z8vwUL%hbTa+^B+lzgEJR31;>Jl6YmT{~t^Z6POOJEIKW&D@UR{_rrhwr5^kiQ4^Tl zYBTgv2s!xG>KE`v^Vgd%DB(@ON6`7(`~iD;Wl2SDLux8i z;s_0lM)qphkgl(Y5IBF@IrV$PNeg#N&Jih|+RJ0HWNtzew2}PaHnhj|6_S)r?uy)N zVep3kU9ym)f*}|e{3EE60fUel!bB#K{|3dz`VYk8Au@y^v8Uu7BnqtTA2kt*j|?eJ zoB|4y<>pNY{Oj>`aSAWE`vcVM;Z0}8DG`ByFn1%bO_Ga9ijU_;7y?`5)iuh#Q{D00#yM^;g?lY=exEccfJX5di8}9)S{x#>=M^ z|1B41(Lr7y*rrp1r2~urX!}O<*{MZj2`wZFd0qA?B2@K1CHZUNCjofpzHqG=pzDf zoUltpua>vTx;<2!c*Ln1(#LEgqiXQ9ud_3*EIc%D&~`2?|Ok-4!7oi3#SPhL`69Mijn?SI80~|1Ci>%P>41@ zKIF=!pZK30Qq7wr^4PlKa0vs{xQz?kV7R+=^)Zv}&f^osWII~;ejwZdc#KzSu#PQ} zmR=>7Hi$xqJpZAK1R#uV&{NRd^Q$UmaiQC#t1%0_7EC871JL1r^F%%o%jZ|Y=oPY+ zvIx=e8EN4qdzPzdk+2Ta_ISSXZ#9nA6-#>rpu#K3JQb5P2PBmN&-u=Qib(86Z!7|2 zG~B*;#ciNKtouYbCZ2HkeVKeQHo9+KC)hSjPmQTB5HNs^JXw1dQgi;1JDdq2OCo) znjImuAd7gH4fN}^en9=doQzo}!u>4((IOVjrf;+6!SA@^1NyX_}$T*+$IS zrqH^=<&u@vy16x3Gg4g7@xzanJPGe(9oT>i!5T09+GNe44`KaxQH891#v=ZPWwXV* zY-OxY!!)6o1bhcdL_@~u#n+Y!P9;j5OY5Ubv{9o`vx~(R8;ZJH#k@bIh>c~Ks@G&O z-`yc#q?ZyXC@IjqstotMs3)A0jO44sYPC0B*-s0HM#J{Ju)qnA2JR7> zC-`kQeq%gnlR=rh;x44?;*F2dR7O7S@#|M1^^XNDH=9Y;=Hv*lyPbw^vD=~F87+Lo zhSYJB*)~|c6Ps1{_@a&1{02l6zwSOg*8>JL{Cdfr=N%H_nVg^Lw5ox zutn2r9(;03V~j4BVnfjGfia8CN51t*4u1aOJn&I)1itcG39d!&q;Lc(7(yu@KXp12 zIaQg;Sb()WXBrpegk2bqI|syTSgiS7!|BE=N?*Q53^hAO@6!J%%RUE zgf)p&s?%s!F_X_;js!>K8J$tWno4skZkgx+j_x^RJSbapfmzh5-vrhoi|PTObR$K@@Ux?UdmsWOBS zD|A&3_#kkT{L=u0_a)4Q44{PbP+)vKr!`_87wo!4aKh3chl0y6K7NE@0cv*>@Pfk~ zg~=n|QE%Y&a?(?w^sEa z&P{|Jb9*$3gaTa?RXnmrw~2~sl+W~p3zPE%c6C@;Vetq$gP&}7Dt>&cf6}GK`h?@h z8L8^DPXgFi!|&N|K0_pbt2KxWXRJtna9BA}*!|7O+cRK{bU@ES9ne!L8KAP~K)n_& zfVJSmjV(vb5o=F^5VPzj{w$=b*Z=6PYyJC*gFCghwJ%*W?iGMQl((s7YJD%Cuu)v z$Sa?ry3ah0SNsVN0b<1G4>2IsjTS=`uJ?^?Oc0xE@OZQSmOD7qWl29q8t3My%iRls zl=_xAgCP3gb3=p%GWZVX1_A_yI;U{Xqd>QZDgzM`Rqhs*n<3WhEuP**q>)2#4;!LM zw)5#|dB%Enq`4*6Ug$Q~u#ZM>7DC$P4?D_UjOQJ8&(tlIF}F{`M;59KzFbgO)h%|o zJghXMN-+?l7?{9=;94JI;Nh7>-YG|VP`QV#u>8@m%{C8ORbrO5BB-5RvEFlPGtt_8 zS9!!2^1&yFig>LItI&hq5MK(?H)0V%=YO~|Vn3fG->+uxr(!XLw`rzvzbBrLp6=4CpAnNgqDFXKJw+FZ3I>3@q zi|pWC1>;M6t9l4J+sdfIS`0$#t$h$aW;nZ$TuG8GGV06129iI?k=PJt_kHF0tN>bb zxn$kFPBoEpS@}x6Z@%<-PeW{}8^MHLryve&5i-l3oz*V(ssbsWwCoj~`-*&Wcmd)Z zbH@j}IvdoG4Y^QbHNQO_;pnObQOgOkLs(G3T5HWMuY^rVHYw4Q6J8SU5)C1Wq&dn`mOPhb9k4wL*`&9xu^fl23N z!17sa^2o!dKkr?DY4SiefrqJ+7%65r?xSKTdF0SYLY$GG?Ed4DiAWS&co~wdtvjf6 zd|$kz0}9dSq76TVC>9c~JyQo&OFQmoVCzYw>5m!39k2!*Ouo2>6y-Cn)%iis(xg1u zPa05Dm{7Z1UtEt;1{)wsa?tNG>TSt)c2m%$iW@ZNc91chU1F8ZV%eY|k$;4n%_=P`B95PgQig zG1E(Ip2FTc^P$*cM%9Rua-Y29rV@od-3pJ!N%2|a)(8{>|#Cj zAg=CW{ocFQst-Cr+nP^=`S}vP!$-BmQq3(UtH!@?7-IaG|+NTa03!Oj@E+*cV^xAd4peMyu?~o zNbao>8Er0tdwMPsHT9=_#&OvLc9@lz2MNd!dwArH`Wj?%QgJyU#F4m|G+()8x2+>E zI^+h)G_LJ9KUIG1F}-ReKFz*~FAaoarHp%r&waBi%CUOUmT=Tzy_BpoaE+HWG2q3B zxL&tw^lK_|=&Xp$>D|D44zwt6i+zZQ#ZYc!Fg-&{67=PPAPIq@#h zs&xv>hbAG9L9Jv{G+$R4!qT|6elEVj&qH;J0F0@TP^Z{r=%INf7K_guVb*?OmoG%V zP+Wo4uIHBJSw_mNg%U^Z&*tIkH;*xr6l z;a4|b)n+%35$4facf;_#gSCyMl?w>bTm`UsX7i%Y8<{Z0)i@Hn#`wfW5c~vG$l4pK%n9XBY6n#eBP!4QUhgpVW~>Q5@M2znl&_0i-HN?N7>w7fSelF?d*=Gg zmKtfFnundog&A^AWK89KOL;XDAlO-UDVh32AS^M44d3RnAl2X-1R4RPcO3#C+ z$iKxWJ{*G1g-*+|p6B2u;vgrAxHUQh8t7Q4-|>=JlL1fK=Mz~B6(M8r6Pu6+tbcFBJ&&c{>W6-ot%6-3hKTtyZY=Zj(a0;3?-DnL@42`4iCy^=nM;4MWu(c%3EHq*O1`25 zP`%v!xKjgE3W6ZsWlk)=v`%tg(wgx^%>D6!YWYq^veD zl7$n$p#ivZGhYocD!0I(Ma3c?8bDFaTMQNnM< z3JwL6@2+wQ`G&cjGhE8THCg|+CrutMc!Z1Bs3MJaqc#?DHeEv;1Jrhg{Ml_CVJ&$r zIsI7`(^F?MT@(AkBk{C#0>rTQ7kg_ntd7pp2E5x*Ez4c1=SGAo=(LSA1^AJg#OqD! zpXSjlwcIH~E@M|2i6RLe;K#)LD9RnPu^491bYd@IPJFa@Ql(xKyWcAHJbaPeurh!& zqI7;um(uo8P2W=PbqGsa=38794BDq$UasE`o$cbfT9K9&YI;c%cC|8k+TO~=3(`!J z9=ekrd(r10pY|d``F=${LJD^e+IDZ=rn+aZYHL_atX`c}7a}FqvE=iqu$xzCp zmR^3ZB<^>ma1Fzjp~(Qheu2z&Xd;BGNhse1@z6i$6g426=MZ~g@El?^nHN*><}UBM z?^+obAJVVkX_Cm3KTwm{5IP}cX)v+&=Op*z?`n(V@#jYwufai*3iDrAaaWKHz*>;v zYIEC`5YjXEV%d3}Ggnok`s^vh2i2=YxzM1qD2mI71^L}ul2y0*i>N(}Sg~K~W-LuD zc7Wh?Hp%H09!OfU*_kn`dh-*I1x?~%Wu)JLAUt#m%C!7`th&c9Aq&9Qjk}cyAYB3KYaLPH*GDA*48IRW_-VG)@bAQqduhj_!K5 zmhBf&FnYg}yBPxsO`_u&!u9ab;<2O}SwjwK!qXS8F|;5QCkEPW+{JO{ z4Sb88{#2jkrj}3O~#nvikka4<^Ihc#k%&4+QQ~ZK8F{(M|S1LnR z2!x1*qIhquAsIawCoS=D#+s(LjxN><9qc#SRr#zV%=(n=yxq6V%lNsx7i{8H z48EAID2AZ=hkCmr{tBg(8m$yOVV@fy5bz#E~`ZdS>8ujnbSNeF+j zNf;-USWs?7UJA$F-xyvjP4$B3RLo&NHOBQ6-?@$I6bs!Ze|UKVE*-W@pQ&fAYOx=P zPkWmBVMxE!RYx_zD((lTqfAvrvl~X7KK0Z9gt*rpt$3-DiHCwtcq^aI)xq&fQ8WQX z!4j<E`J!>$Lx|8xs-2JXvK@B|zI@!|gk)B0RF}e{cg{eCkJ@+YZJ<0NM56PnEm@(%IYCui@_b(@Td+gcrj$lcK7#(v zl3>W%oh7354(Q89tT&*F8Y`MhloF}q^OGFP*0BHt(GY3KW3jvuImt32xxO#X#o6f7 zO-UV52y2R`LJpHVkIxqqvY1rNPu&>}n*t!(o?IDqq+`LTYo< z7Fi|G8Os?U-@Py`)Lu_kN(8-gk5xU(`T7(IO>iP0`cX>4cMLT%S$9yUU$`ltlmB43 z2~3~%6Q7$b|JdHEqJRT#ZC9OaX|`vgkjG6&j*X2>*p|rg?l?~GpI$${kEz^({tGW) z2dy;d%}8=;o=SNA@se^N838%PFA-DQ@RKcj#VcWjQMD+!$(Z>WA0X`4`3V)0i{dZ5 zzqS`P#YPaZduqxyylhk=suA$zU*Rl6R=rQe?e!wz1T#w9RO-#dP|_O{VdMTB6$dQO z7sp>7DdegM7RBNGl=LpD;G^X|I%5g2K3{whkS#2;eGVs5B#hk~WEbg%3fYm+X=UE_ zTdeEqX*SJV#(g|g9w*(9qm}8rp-F`L>?<@`Ldfy6yycWtbCjxr3sK6RiFNqJ0`5>d zNL^W=It%6sRf6d1iMX2-3an&e^jS*6gm!g-? zWNJNbBV@4=>WdB!Ix4Xjq09e~V8U+{wwF@ZGSE;xOpH5VyG612EjG@=5{q43xZ zx;_l{s5|Qah|>Ul+4GJ-*)~DG0tJoSr*K>2&X>_%s8s1jqb}eCz{corpLK=5aEMe| zE&8c4gTpD3Rr1tyetav`0jVGz&}J$bbQQG{!`4-0adWi^zoF~kawYDKK-}@vV3Kzb zK++U8djWzFBTS>u3WxcM&~@5M%-}NaWo)bByjA+*^MUo06;qVNV{t`~DED4IRhqix zl|Ggd2kXnRNW;$h+rS54c}|Fd4{qhrTHupAVcI6GA^Wt_@1Z_XO<{RZ&2vLWJ zAl34!mD)zT){Bv2;k&g^Z3zf;Tf+CQgO8`J5??N{{E!ctV&n$Z?skKtn9CA~*M{uh z)uc#CAf_hz=7tXeu&!9!`sW5aE~1{Kp*BP&^Jj|mQOOvK~r>f#~6%9UHc6cv|hpWNqK42%wCx@`b*dBWf1 z=bn$5h%YX`uNN^Plz&5-Uw5BlNBZM9lg`cy^XCxS!$%^%cF|KLbz(r@xF!r+rg@p5 z65@$}G#+(Hy^`esR4uZTfG!XEyp+MnaTinmhgZ*!x94KQ_aE({+!bapFL6%6RQ*Sg zR|J+Gj(8J|e)FuU!#k9OufhTmfi38!J%YnKmIBz;vCc+Xaqm}KpOto_4 zz9?3OD9VIXgZ^kfmQqf#FxY2%5!_aemBl8jP>-Y3L-)-vz`!$VcSETtKs64`SVKJS;*f$3Ys3u8m_g{mIj&1F zQ`gW7o0@!dipI#X^0L}$;(E#xZCt-^6 zKXX}I$Dc12Fg1r(3C`(=G6!X~Ogon9tTeVBWn}9Sy=gr!Z)4k*rZ;$uf#j!QhM~xi z&8E@X*YYah-Mb5#4*r?BCw~hxj!b`hm4v+JY3vg2eyoJqQ`V*0PpqAxMDmj(RGNZ1=r&MNy&az%0~+lD9+WOQn9SWKX52Eez^VizjZzJ1&v9~JMl z3`$=|@=&#hdL0WLJVQ*;b58%%SneILD)%{^hcnD9( zTj~gB4H_rKc@(L%B3CV{$x9$FoenSkn68+8m4P;&iGNgsV>{Q$5qwH16Tq4l z*zkZ*;>t=2f3XrK;=&d86S_NEyMN$h(-}XRt7SbUQ`DoA_GyHPOVMz>Wqq-#GQW#z zDeqh@-{TS~2PJ&w61`FuRd)K((!Bew&q)_S{<-?d;XJcj=WVipV=?VZRmdAu0M$8;0h3vF^>q(1CtNawq zEckg9rhhxlqw{oyi6S&Hk|En>WuB%Xrt^j6_m>_+EQvNN`(=}y>65WV4sz1uJk?m9 zw9h)Ah`UB!=y#KH)uvRc;^48<5ZxK;J{yf5UxYM;$c`d23|?i*U5LIC*t*6rh6N1W zg2W9tz0P=(;~7`vO|Oem@-w%c!$17v_V)4&jXue6tTJ5c(u~WO^~ne$VmqqcaR#qB zB)8~VQ54iYC?{?9pr1#P4c6!X+&enuT$)?|dRVfZ&A`%%@l|5ihr1j~$HjpiAM~}! z#mQ?pi|GEbrTSY7nmc^nNvT;iB;2ft+NqdETJUiG2mMK55Ci1Ns9^<7u`UV6iPNRB zujKDecn%q}Gp6M`<_7D}yQ;B1tg_#E%XD0Vg?izK7>k0fX9QqGqt&!Tr4y zVR1d0gt8SUCag>@J#AnY_ynXp)~gXYbyarJ{}(w#J#R<9lEcpiyxK;xN<>>qd423z zodse>(AGaHXU--U3O?7LGxO}5B0od-v{vZjRLK2U!FPd?9L+nj;(%95p{ z`}mjrK;Ia@&H7;3+9Zr9i4uMHXk!PK(m=a-aHfqvyWu0Os;ti@!m1@k3ev}Zcnlo} z)c&sL$rcN=;QUeI9to#o;lSV{SY>? zi5H!@e2_8NzqIX018b@;Ux%t6HuTSk&8p2Wvf*VJ3$r#R2YNT@tBN*0C7~c3t5w}( zrLF3XKb7uV3?24;r8$h0D_h&zuKz-E*svM;u2;Evg{QiCp_!r`R6kTXXu5u0=Ho#_ zlF(<#M}0%CkigS}buHcR>k3GCjZt%M3Q(?1eqQ^q!tNU8+-I(i^|y33$Y&(K^>)+) z`N{qC2e(1M1i?fqH^KG%C9VYv-Tit4Q=^KFLBk$+G-l1p4ujSps?s_U9;kLipA-1b zigds{y`Lx1*f;T=aB6jURMMYKPJkJn#LOUcs*>`k7`5jE_<}8i?)YO-3(_f@(^4JK z1`F$kg16;N^M(%M+Y;nrs6<}2%>5gWWEUM7+D68 zKRaFYd-kH)6`$@f(P+BZiy@_)TOeY4g@wb$74a+xIDvC}FYp;%gC~P?^1G?iEhuh< zF<;o+jc$Fj(NZqe{Z7~CD)yGnOh!|0P3U*JU(V_=)poY2NDv&<-YN=m1Gsc*>h=lRV$JV8PgbQZ-E>q2+5DfkAQWKG&M%t> zlJ@4G)2yB7sg8rIBU9y$; zhVIo%_|?cRx_B-9!X@7YZ*0#R^0o1A`Cl}l;8KDEbaHaAq+{`k?+*~RtiEN_@3@Ca zdR;(T7N0g_`iD{j%Ohk73|3i&!E%ee_W=Z+Xr9-Q#r^S_z7p{QKQaJpn~GWZy1T)- zi(+5iCCvht%O4-ZFeT(!>$kPO4J2(ODd!rF#BftijhMAxw-c4- zv*yz37@AwPyuFH=!wH!8USKPdC)n4>))3F(F)KX;@$MpZ&lwEb;kA0k(N2eLCu3)v ziLUOGBBs^_(R(bG7k2%L;shZ))|vB4sZC+LA&B>~tryEEnx+oD^Bq0?KL#?k^1eU; zTYeS+r6TU*(fVY|;562?Ies?Rr6|#pubzFe)@yd8xEZnuVQwVzTUa#BgL%{ac{e{_ zg6aehfCY8E1ra<4z9wNhWy%i3cKkMEE+iBMH_D_ZPJnsBr);j;+jQI7%s^6t;L%Z1J6s#$w^Y?y3O)s*sM@S#q6Rv|pHg_W;e(eRRR zs-^6fJzj;3IYDE!j*+|xS#cuyx~oS_;G@RQr7qhvhrR90&#A~R{PS@;!f><9c5_<0 ze%Jk_z5w#mNfOn3yhNZSnvV5yyCmF|wU=()zMdXRBf{KXZn(OAZb_f|Ygd)K>fy=*5JGdgEP`52Q*Pf2$Uv{op;ShiR zxQwW=nsZY%Oud!y*DhDZNazj+?|li*hBxIVm2mN<)2Q^xrMgX>z{Duy1!D6JwgVd$ zSW9K~djlV~u`J9Cy2-1USrCzAlDUMsp>HRD;Lz;|5T2|tLcU4wtutzToAIBkS9dI|^Bgx;y2lEM|D3$KO&5W>?SJ9>xbUHhvYZ%MsyMHwC)>Ir?caV zA{*A(6^PUa^o>}9AyG+sO}`y%0cTG&g9bqP45Og!`Nd{pe3ojI$@;8LM+g zQ^CPSrK2(EfZHA=4>DvVXQD(`db+E-%I*?3vsh{!pB3wwNHQ<$AZM%2K zA~4EkaHFh!BbI>gaLH93@41n83p2OgYH&1p z*T*)rbV9T-<1QxoDU$?l;{~Ya!PK10%_xIF*uDHl&Znt6Aw$NJ5Fbe?VtI(PD~&6r zz;(`+dcz)n1k3c!oet=L>A=$vRQ-pfBf;nX_AZS+6fo}jO#l??STmd-5uj3xa9=jz z7|b&SH}S6G8F`naaoXkV$!LK*ls09t<#60lWu{@@n3t>?%cWNxTQm7o-ErSGM7?#i zb!9}k-gv02>s@8Kf@DM|+Zwr*iFiUdXCf}`S;gdr#-V`N62guwIU3QECrD_}LJ22X z(=VG&ozG9h)*QjgtDHVKw&O#@B2O?;i()jq649fBw#cIR@rJ(q?zc)boi7o7;b<76 zPG&Q-X@LEv*p%3X9; zLQgbYC$dC)6Jy0qa*)O`cFRvhDb~Y1Zv+5PuAg+U@U)I_uIv(#O4Y)vmURLzwhKr~Q< zfJ}_}@s}%YH!2Q~M~&6H)#nSc=rsAf;Upe}mXCc~jQ<^75L&$U_*Glgez1@#* zNenkJ$pZvGbF_SZ)o7y+4#yj*;Bue`;(A~16R9Hp5*fpGJ554cuUw}u&fUAWLIzsD zb^f-sY>8d!w}~Z4`z=@0ir{hzfO`tc65Z3(Gf}J&t#CnI&c9H~KZR!o=P>pL#WS~Z zK!h@Wp@Q6N*pSHa zW(R+JW65H{h_=PKx1XdjA@ml5;E`Hnqodwg*P6X5?gKJnrVQnTk5&B8c5!h4k+J3Z zA@mC@XL1t~D~N6j>cYwSFrx5>jGsdi9~&b{Ezhns70|5G9c|TqwZCp4I{y}=a<=S5 zf*2ktepswMyf10tj|8l#sX=`?fnT<*T8sJ&B;V9=vqozJm3&A>vxFhE7?@TMwnV+^ zGd6{cZg#7ZTX2i|z$_LbQS@xWaiMVMS<}c;2J-|>ehvLSDUFZIygbPf9a(TM*?@3Y zQ;vrW0cNyiqOT##aFUveu$X7OMme!Q>BO&!a%XcP>BDUrF8}Sv_aKR8l?7qYrAlrihZAbmNYX@vdbGABRjkBf_@y+gw{$Up317LM6V-M zfxNf!REOe66msh=TZI{f;*t#wElrD8FqS12G_NW_k>2c)bQ@_RcHrE?kzeK%h3hGY zX?OCw8yS7oG>Gk7Wj6?tPurfiv8h;^2)jq)C?YibTVT4~h1oiL zsdJHoJTw9bY+P_NW!JDt+Sfw@9|xBCG}u&*^Z(f!!*kI1>u)84j2G3qZBs^jGfWEKQrV|r|1UL_?4|oO#fh@jE==n->;SFCHJ5(!GwTf1K zqKb^lji!v>EPPQs=(O&5by&JMH=IST^c7v#`4naC4k|pe^?80?cgLC+R~Wds;hHHk zSPyq}w25?(Nt|HslkyuX3)7gOsoT!NihtVb=wwjJ!AYOYVdx>Tyr`ulY zt+vP6rq=CjQS+Zb#42}lsjz8<$>d~aNx3U9@J0tq6=i!McE63S zT^=3-eqdUj_u+J}Ud!#?%!5)793-@KCq9Qg(@Hs-%QKlhk{NMX+d}|Q6^HL#M`t|K zhtELzr$14Vx(E{uWOWlI#9LN;3CD1mQL?h6OC{`U0+MSJ8qR%HWjYyuW_;e=mf5Ia za9gPOdP^(oEzX#nM}&vI)Wnz0r{lJHD{=-RKg(1amK%RoUxE@UP^bbjxrF@-WlrYd zwh;c@0{QCqKZ^BwLzEvVqP*ny$$+tl0;Bo;Na0^Q)E8Wd--fBq4TQh=+-D)nfkx|m ziHWw#ZmE|&r;9v)yJL8~N#PcMZ|%>;-wEI~Mq={!13+}wv$#J~B`+pl8k};Mmeqh( zYBD@9j39Wy%`4@>x^+wICFe8;KcTpoFNGg-eSk|&Hz5ei$_Wy=>|V#Ey%k^GnfEwQ z_lGj!A;M|IC(4Hyv<_j5k6!4th<1vlAbqQ~^w<^?-^bjw;c=cGmvx2*lyDh*4(;7C z7;@xixd0AHxE_>Z>W_7)KER<$&%N7;P|dZ*lAPHu?H8Gq+Noe$cVMvMpbaDE&mCBT zjEIm|fK5ZAE8g&UH`s$ZhPOfA)tB-x`Hy(>YDJApR+IiEeGQ=)Ni&B+7bnm8)$A!% zCER(}(IB`Wf<(9f75D$J_tsHat=qr&iwGzph_na@NH>UdNU9(qprVwNAOg}Q-7SKE zA}Ni6gh&cVNQab?N;guHFWkA%?b&DFbI$kN@w>n8IOC4*AA1Zp=)2at)-&fbpE*CV z(or>SC_+nYW`q0sQrhsj%KVONn7Dqip_enU983&EfQemOq-~A`YYN2Coih+?_SQ`MvpZ43{(G(2XT(cQT*OZS^D4F+qu@IF9auvCctzimc{v=L zIH~RtIcer2-iPr;yMx}vcL$pWH21eVEoJ%^Vj`UQN?h4nZm_Y=*GWHiuqU8vxmDkk z|DvEY59yedD%U4+%0zdaQP-jT;sY_d@eDRK0o5ZHa+}n}DFDayo$D!1N5w70=u06l zx?+&O0kEnCl~TJaR^L9jTPJeI6KXWz!x*hQ+NTJ=qUkC+H_GC%1n>D>0srx;UQ-za z>iIm+LuHd>U~7Faal$r{^>#(6l!Jul?_*)A4+d5%fW95ysI0q50*u&L$lAcSIoWN) z6)R|_k1t0;6XUP7W)|`@;aRh5J4x?K4hYdLpHVL-?p~1SrKtb}9wZ2)Rr9hVn-@X- zx1iRkR2fxiuK&68G?uhVLBLT*67K&9_`JK_{;-4X47d(FrKjiAmbkfpFrFuVzF$E4 zF&;Cy>kX`Qy!2)i-zFn;bYYLI8>>$a_KSvot^h47Vx1OiZSlrbZQV1S2mh?#DYb;> ze)on-tjc&-LRcFBQNW^A@+Macm^dFOHtG1o@^9W9!hv4X$7=wO(P^0|!VAz&e8fea zMMah7^0b(u=(sK-V0O79oaZW$8#Tp__SPhguxk8|(abd@2+ z%hC=G2(?Tu^;PjNI(TaX<_}I3yr6pHu4ECkI;kY#ARt%FJ5`jqXl9w(3p4K5GJ3W#XG^6R19Ea(82oqmt*0@Q<6OVXgnpbT zXwc4%*kGuAh5D`)YHaPoeXoJ?q@1U<(oqyWM5O5;hb^^7exbmuH7?$w+v0{m3$Lug zjm}8`(iJ_bEG8B`dRnJyJZ`7R9%mL#YK3DKWWHKcg7|9pzD#LIE9iT5qiXw?cGV?%HAk;%r_@B!kXhS z=F5Ow@xW2du$7n>_lDVx1a<=)rjG@alwD+2ejH0O(KQc$I9VJURuH>Bg#oCghj{to zu+c?w5%{L(4X=H%=S{=|*cbm%LAmLqRa_EBmxyL2X=KzQ(yA+_z0@(wmoXkTq>lqF z*+jRK*GfZK95Y2{#EK)?y)zGQ#}YDp`y1tlJFweLM;fT-VCQXju4G&_Jwt zxa%s=#Oq6472b+@2ynE|e+iu4`U;HtEb3W#%%Bvfj_3O83pF5j@lIycN_)aK^wxfT zO($Q0?{XzkVVI_up(KT9>CFY>C-OtsPi1O&x&~n8C&e=n;R zrsBu6dp^rvYAWfp`lT$QOn8G@KDtY4_!ZZHqe{s0{7Z*B8sK(=9w_X-X}gtC?0TcK zk5Czv`FW1&$_jR*fi-SO?n{K_6#=^TAp#c47ZM3MS?ZFq?m%=#lDVI6sTNGr4nrIN zqDF9FNXBzbAC#YKV^*i&$m~9eAsKNUdaY*a2ueJryI}GqeL?FxdAD8;hZc|P#d9ws zeiT!E_Fy+4TcEL1^D+AxRqs+)l;&b!ecPhR<=s%;hx%q&BtVHRUoS9Xx4CQVD@+2W z{>&LG0y#WZ%fp?~ajw+aS}6T1&lE90q$7vE~P~wBhTyezBAI(pjgp4StPo8%Sm&pSY){yxiznM`3L(S^{$#Pyb6|L$cf1rh$sypA?e`w!ch9X1G{KKPf1|#!Vrbq;at>TB; zr5TnO)|wvbKPRse?~!wAx6Qs9jiC|en~UX$8QmNQP16f0KXCN9Tj6<$03Mn9L#Ba{ zrgO;(Ogtca2Nb2$Iims8_I+KlNZW(Z1Y zI_;~Wa?W38EO~^65@UOdTBSg3;S#0VfQOW4)^o1X3m4jM_U}j93GwYjYWxLz@a#77 zvy8Z%NK!?Sl%fE9Y29-zv9y=5quMR~brdlGUk$yxjq4}V*;D?m1H+TYBm~7IaHt)N zHC6&hyNqs`0pbGUXqNkpW{J<^ZfrH9>ZU5U7$a^Y!ow~MY=1J5jMr>8JS7hB+CQoKwMZyB3@X}0e9(W9xvfxViYW8-uw1F#~wVQ zR6UXV?ATt8_L3h4)_hJeqcgKHS&#BCaaIH^_gnpnE-q4Y(ersR%>EGxJ=R*w3*sft zZ*DbqS{sa*HHpR+zjmL9?Qtum0}RbA-9hT4cnhPgiC!Kj1nM8btG#D)!pp6y4=jV1bK5Ho%?&vx$=I zcg8F|nZ{c;%ZC8xdNeH2{e-r3Si+VPK90iE!eHH_sQO&x;^ty%aks~<-qu{iQAVf6 zr|=nCVt+MA*(kgjH=^elN4t2Eezv>W%Xw#QJ;=_)(0wQMJs< z>dy=_%^05YNhi$Pv@DV->Upqy|14I3*UAgAs}z<-iszXiCVxXyx9O^H6R$mB4&K%l z;*i~|e zzW`50pCq;+l=Wwo`ij?Lgu9eWFxOu0;Z+~NkqSD9U)X@5iRR!BOZJv?-vu-pkYPnY zhQc>rdVc4n&e=hO)e?y-sisdY#kKJgOQpl=wz|k98eR4Q6cP-F_a~DWX%Y-6s+$Zc zn_Jfml{_ytl>65OREv#_FyFA)xxKKzP%@;fXc?5{OH}#BSGa9P`F7N^y&$cdh#i^2QD>;8g0mDh0~dG0n>+5UjnH2wc-7-fs!4rI zEF2|gapNV2TKCr2FEK5?v<;>Z;Q; zwMk%W0qOKsiOe30Sk7A(Do)zE>OiqZ-fAvHZX%!rd}Hi3z8$EOw-JJPcjkjwH8=29 z`%XIJQFGpAsgA#T7&cC{Wq6|@=1QBs5g4(cVh*wk_8;NvrqV#7(C-u_Vr6@i)D#yFG@nB z+k|9Y#%Y!@ay0?VcCK%_j5mDfM%Tv;we~tpqo29c%M{&XRbPuTSK_%lVh@rN$vER3 z>MoaJZ0<|eNOvs1mpC_goakzSz{(G{<9gp!#QB!7g3ven3QPgr|!N{sr9=f zi&X7wlBoxa;g6@@Zr5%YQZPZ=_|XsZ%XezjMH%Ktm7_H)h1NfixAG|qxJR)YActv{ zvK|G+&9{j-=WqhT%&_7d94GMvRNWL|YAk}!7JzwI-pZh+MJ z?xs$;IqK8&#o4o0v$i(EyHiZ_W-`lft7K51k0*8HQg!`E3&S*MKL06^wCX_HkW~p+ z@#AH&Q&RCwc+8&#qG~aF6`x6!Ij7jycPDb=(x*4pEk{`$5E~`t(!S*wF9W?XDgKXv zrDjsnb84Q1F?Wvu?x*#I{Oth(9im)*;!c%NqfYE!5>vO`cNWJsY81)sSdb)il-pIW zErzwR>{7QyZHWY{2Uo8J$lO6rAf$b zRKSx5VCZpv+>qFvghrdi&kX+Z}04UfK^1m^!j*WT&>Y#S+Hz(U7j;ixh%aB$>>y3tXOMhL%&&EibPMVFltEM2KdF_ng>3o19HI@M@Gci-0sX&=L zLPmsyfchLM7g433-MobCJBtcoB2PPY*U$0>;t42~sunsHQd(}@u)e17ET!*qk+>DI zsoG@Igd5MsAMH+VK6cz*Th5S?4n3ZBf3VcLhv=xq{g4Kh-_xn9Cr|{s6&6{9c0Yg_ z#V+mnt4i|vLA_6V9yd|QMaPypU&-NheVngbaD$lTpw}jNC@(7@HS^s4yfU=6Q{A#TKf5 zr02*~Ijj?4SZNg)6PS|;v$34uk3(CGe(Ng&nVc|8nGuoY@Uv#*V$4KNqUhsaPrAGnU-Hy(buBtOd&-Z1 ze{8(Bf8tht51+wohEv9nd< z)vy~k?^OX`tJ@|$vqgFQW}A9EPBt_=wq0f^0Nc>D=eZ#Nocb|Rmj z7P`5VWAw~XGNUWk-|oena;HJUgPk`o@SOk$T*hKEI#?ER!R-$1GYq58sTu{tCMOyr z_+prE7kDi==uNoZr{X%vY#Fu$&BVIwHU+wFsd2&eGrc-{o4T1D+cis@`hC4)hrNI{ERF=&O zT)u9Z#Gj*gj+BsOIfV@4n~Q)(?$~%80BAmumgYX~6pjYYyPb=f>qMLL(e!&YZetvT zBwLhY-d5U=MxcYISEBy*J;k|;nRZ<~$!=fo9#NH^!K`qy1}%c1JF6p9PK6ZeS;KV3 z!;W^0N9HqyK|K^T!mWy-ifu_%b3R3k4EQH@t)N{o<~rq=bBv%<Zr~q(4;}BH6?yy@jrFS(Tb|?eG|?iv3{PF zHbV8B%^onp9+J7faUK)lclv!Rf9qw6Xj;y{K0}3RSS6a_Xtv`#>|C-hol(>SS1}cY1sA7{U)G#C!peKk@WQds#80nuZp?kMiv62o> zivrhZS1J4L^d&tT@A5y?`jzrdP_OHhoT_xMrJOzWs3&gi*q-)FbEm(N$xA&E$XiY4 zaR12J#ZiMW$8kHV?k2qF_uS7DKz^&ASGz6W%Kf!0E-x&W1?6)~#3s?~l9jf!rrcW1 zEGkIRc5}*IN!^7Arsn=3GAQ59Qk}9cfcdlhLQD`Bz8hV}iIhG8Zv^~i?CCV!NZ>!Q z|Hx+R1VS<)S2>r!HOQ^JM03Pumqy4<1r!rB>P&nDVWOx&-vne5?4cYrPyaoBoizCh zti#-;c-Oo77&EuCiB0V4RJWy6_wKh5EG|XUHK?4f(NIUvXG&5rapi^sHAlH!<*$UR=2Q;r?ek&P65dA=UgPI%GG zbh(OzTT|U^-zT=bBSG`F0v#qUp40sSGZ?%~l@Ls{sH$sn=$ui}yWo^rR~b>jZ8-X5 z**Rh!EDWg8yIp3d?9DYyr5gETJVHv3Z1 zjPYwcY4yNJ+-WAaQmwDLr_|!MlE>tZ&vks5O~XR_iI(oS!#wkR3yy%C`yR3)XW+1Vdmh;cU2G^_Od3QMx6w$Tk#A@~j z`!nhc#$yX#3azO)OyYuiL>nyp^Ql1MF`Njq`LSsi3rnjX1yVG!#4`0eUa21BY#BLV zZJ2#sJx!$!L}J=RidH0zJjv=##Wa0bx?TK;C+xOtitsp;V#C0@`T7(ihfZL3pgQ!R zn&r+oep=iNpZj~XX#Q^ZvO#+q7ydr`^Ga&>&v$+j&Ne`dw>rjOimKx!(|!Pj9iXGT zvrtJ4pp=3d`lB<}~5EqTp4&UWxbFonLb+Ehr2?IfN42S0(PjaME!A-lQNZKH^))^?=_ zMcUPPGJ{s8Lx8Py&<)huCyx}8&nr_*8h%G6<8e{9uLGd=+$S@C*?up7EX7H6W5r(h>t=Y!r{A9xabR2x7E=lRTrjI?um*WchwvUpLjoa#%k0%Jixh4 zhX}-P8CqCDrT6|Eb&vnI@1af)mG?g+UmRIWvJjd}#bdq|=Efj5;4ALO!r3-w7`5*s zkdjZGj;e3u8h9af$Ni%~!;E#*{srSD3#SFb$++$^{n6v9979`Ucj&0p6e1InXJ4Nx z7vC1&BcoI)`kfufF=VjhjtNWwu|59B+fFhm8WQZ_@)J`e{I@k8UO18v0ky+ULiM93 zjY%eT5S6j$Z>LmWYdXa@kypp6mDsxR%ndJ1INw%Bl7iQdl%-lW_1wG`7Vhlcr#H3P zTB4j&vN`X`yRQPiZjpWK@)gY=$#h*5X8^1{mQMZbvio(l(M!R}t1H#QO1nPXNl8@% z-t|nn%Opf(A+~l`Cy>jB+!as8ySefm)*g)aEZG1 z_(yQ2twV3h`yW?tea_FOMRyTIV_RZZfgX4AUS&8bM~cmk7sx6GO(znx`R3hvdtalZ zs1!!LLXWl{Dt-a>E5{3dG7u|tQ>3ceJMG%S@HRX@?(PE+=zrmjpYkk@oh`Q1s zF_zy<{Yeq(hidf0CN)lmQ+!AV#AL1U#swZ2Q3^!Aw7WR;`5HHGIzxc#R>2KF37k+CF@7JC>nD@_t{=)Fa*7CN{VsUMa0>L}Z{|08!L1>*xu6R2>LW zMoP!eM5?-Si)6kpvvNw7S7@0e=jo4CtN!Z6%bjJijHAK?*bjxv4P+5eEA-T|zQmAg zfq5F;hJfYZotT&FOo}<)<5hZ#)4+@@l4BfKATzdA9FoY1aEN)ZbzLbY1(ZcqD>@S^ zGWi%bm=syk>7MPIAGO=5%p*~D_qvfYw!yRfae^q~`VNttM~#U?0hdHhd~9*sBVC=1 z(V~7P6NO3EzNmEyA?29vln@UB;!i-I>`AORx|^^d@Y*%pP)P9^3U#o4Mdp$^yl_SD zP}9Kg0uQ@Ao_vA^kVQG)g==i-S}904fjm4|2@q~;4cgHVMk>o(omHM8>tDR3`XiE3 zw}Oim*un*8eMdK#xPT^(AYS;5ntil%ZeNXNr*K#&BJstQ&4@rbirib9Y6E1vpw*ZU zxsbKtCvyq|&?GK1Cu6^*V%U)&wt4{iq_+}s8(;%i0i(ycX0X`3v$tcma2aDI+9WxEVLv`6BS&4CVssLLwaM;ejv30T~ynQjA54$xvR1aZHwqN`(kf2hPAMgbq9 z%l+vIMm_GO)2K|uqTm#2!OI@!Z^Goi_7;h64wykV0dlm0`!~0YZ|NBIY=|!OIDH1{y7+C49k`yP2<2I=yHVjzl z4DjvJefFb3{cds<8s$MvEW(@ATrCI|K|cDWQnQB?3=ny(=Wds_XHrnww!BRW$*wXx zXexW5jn3+ynJG0s8(7oobaajAh?;*)g(Ed+b2pqglqJ3n>2KKJ437aPMJ8Y7PWOkm zi7oaATP>r}a#YxtIuj4GDsKyazposM{l=W>@G7D-or@;-WRq|2Y$Xc4ZU408yTD}- z-r^s7*n*F4d?flt2?X&&xI@wMqnKQmDc1u3aQ<*Lkc1;waAOQtqJ^u6FafnFIVXw4 zR7WCB;SzY(vLWaf&ImznbEW;gl;5Pkeu?-Jpx+13n7R3E*nu zQ4*++G_@EE!M!;*KjLK!0-G4%+{nQV&wvt45zgA4S{y22_z(oT&5uUFqXRJ~`LprD zbr6=H!^;Ng^}q9<_}k0PJrrwAayb#mfL zH~S^NOED)v?RaKDx8MD9~o4K9$ZohmM`*x16>w7(C%Hs9$sDC#_caynq#=~?kGp_o81uN4|(Qs+~M9$+JGgo-7{l-;X6mTo74Ho&zPSEs$W7{|ILMV(m>p2o8aIjRJf{(! z9d5K{$3&!*%x6a~?~XeZ9MaIWHM;eCLI4sb+^w4r;QRxGz+zJ&GN=Tsqd6p z%%jXZWPhlggGjjbR4WTEU_xs#2}I9b!yhP+d;CtgvIU%h&smgszW-9XIpQJZL zx#-)I%xuL}WCSEE4Du~qwOOB8VnNMjyH>Zqnw%)N4A2_U>J|1~e z?$i;pck=hH2|6yki;Yl$1C*@GYxS?muAt~Kkx9|83c~xDW(O&uIQ6~#emg`@6D(h~ zWc|pH#cb{jWTRiK?U9E0B-!weGGSh&&<&`x?;BzVKlox3aHw5czY14GIJZRraoc6zD8b7*nqzKQ!?JA(1P^8i0l9m zT=)l(t%`bx`|m||K@z13_dpZCKiKBctDPMa$FNa=`&Nq|X z-c2mj?^)2cmLW3OcAQrAjpnPLh1WvqcWpGc1$-!e@5c9G7>uZL^Gu(r?lG8q&K38RTy#gD z39HS`$xMXmE;bY_C0Lv4aIrqF$ zQy`cGj4`$FxU5lBqPY!5n}aOL?4Hq26Wi<(Y%MP4QqBvSrZ^Q6k4;gQ;`V1jv5BLUgLcT|;UT zfJSDuBqI?A)>EVw5vL5MyXd9AiEMP|xwr7rP~^6l)_bJf`Wv6*O`~fszqObmzn`g7 z(WF)!a5NIyqdt{M2rE5M7)_57sw_t09=uYvvU2q zwFGf2-PG%MBc~7cDXrrMLqS#JXDunf-XN^ffM_S67Uz-pX;)2 z;dmFYkKPTgI>1M(+zE{mZ?N#O8vP@ymOT5XSrvUAb^eGQHWF=rcG(aE()0h(i=~iD zRav=(zEbvr)JT%&xjfj+K!F|97so7)o?_A*eO5RvTge&RXpP(R?DURWgW|IZ>yY#M ztt>rd4Xz(Xv*5sC!W84PF^grA!NK*uYGL=8+tEIV8OUj0BBQFrxdpVTNu!%XiQ>uMD?1B+i0@fAx zPdd9X=0c1anl-B_3~Vz4sgn4qH!*Qr<>$yv#bYAK^pVCyTS9jw-Ej0?KEub^Z_-eWa%N zj8}GHv`Tl6roqUD_R}s8{`Ieg`7SSM2>w(MYpuhZ`b)&^UW2nce5Jo^ch09!W1jXn z{|DJ|KeE_|gp0QMVtjgwY0WX>01g_eF63lW`isL1-U6@Y_uR?q6kH85qPe|<)ui0Z z==wFRo$Bw0>RSitNEecnY93A|E$W=gwlGT4%UK@*sg(cctTrOJeb8jMRg`k?7 zaR4Eo!;E#q+A;c!7kJ9A*T?hRgTLg62Y&0el*T?^ha6{&Fqtd^dpv*q5?T^K&jx!Z zn++`~SM_tq$6xA6JYyRgHuuT?>wOUMs!)A{a7IN{v%<6lT8uR9TD(&wO!Ls>7?>?6 zMIY{GzeBAVc7UVme&(X7MygmPb_bwR zzdB}F|9+tkW!&Jy_)rEs0pU#Y7e5^|BXm6eFT{ps+ag%rrKWWa;hxDUtr z3~rCNt8UTCIZdS#X%Yx~06{Bn=h>G=ocIeRDzV)#EXMFfz;V&E+Gd)Pynv% zycac&o3B5(5-ZAlC%KG0ayQ$|79s%n^63aqw%I2B)p)}l3$ntS?n=>ofbbO&7iW}+ z{NDku<4)>ni)XhrnT||H1Odvg$*5AUiY5zlQUA~zlP_&OL!jY3R|lbEVrJHJ$$aO? z36rbGLegKhCATNdDKok4D-@d!idSaIwfsOQFLUAv4ot#)6ED8xIw$6#kWZ0%F3B0f z^B^2uHVIW?QU88l9t`voj@fA~y{}R+q)?>1rThB@<>Z~*NY3uDAdFOFrpJ!mSjvc) z+;Ugh$S}(-zp~{1P2UJm>g4*W*lHI{WBwbh;d0aaJ)OP+&&*)eu_W(EcczIA=%#|Z z&&W6oCkUaH7uv*h1S#!&?tbr`?pX=t;c*IawASo-W8dJ{eCEsH_R|y0*-{ z9=%$7i7rUs*xO6sY%1k`yLdA8?6sq%h|X7Wpg=AS^LlLSx4T_~d;&9lvR|UOli0u5 z$_(913eXZTD{iU(y23UgJt&i;ec$z}`-dJ807kV(F1zojU6{#s{~X$}+%sFq8@`sL z5^7aeK}7{#c^7mJJ<6W!bVrAWHLe})ZnZ^l0eiLlxoh9;##+Cu@w%_q(eN;e_E4y! zkB$cA0wAN-PVJ<-x?-8_{xUV~1-#ee7A`&I#2D z#k9-s?;Nm{*}(>+iN2zcuINMbe|PhDwC)Ok^@O8kMq}g>*&zBlFw;Kl&fUXlqfQ;hnCoH>)Jh z-Ez`HK;cK5wJ6Qx}oVFTPyullhfblX}UV_-EUg( zF&S%%2s8ev8N#He0JhP;jUum{IO3t*xpeLX3UMX=ugxz{$GN2baO5#v(OH?!;=7B*PE3xT@D>{l zy!!fH#Jncx%aN<>?=xL+_WN@A8Vs2SKi`ic7{;!(TiPDt>0vsD z9v+QoUaHo0=HNbNonpAqoV3qNwLQ`u&2D* zPlsNE1}5$dzhU|X%B*76-1k$?EBd?dCoH*EFZCxjw2z#}C3X|##SNi@8fGkk>EC&< z@a({WlMJ=+X9vzJ&@!L`S5f$--oNu;rKT?>KOdI~#b2A0Zraf%MfBt%CyC+uPFU%~ zHIKK*NsK;JEKbyNVa;7I16A$&v&!5))gfMMnMxVHDVO7q1J&~8A66t;X%aOL{B2H8 zGo}Zzj+|TJhkic^I!mkD^pGt8Vp(HZWE*Nr^D304P!=jXp#wHTz+@|^x{ zv;E=Kz7LBd^pqLpacZWOfjB&DD(fzlXwc|G0*W*5AJAyZ{oxJycf}7dGH+o`6aF#; zKm0$q0q_9*^U1r25sWMsmEb{<{F{!IfB0c{2VvB+>cq?c6fQ2$h8T>F?QpAKai1O$a6ijSHj!qiKW1)VVdLKoBM=*5YPN3Y* zSDrxqOLt^2V1>SO``}-hz;H)@c%P?AIIRHP4D8PLW!}}`|D%<3>O6)AxJI6ein9Z( z#2=>Khch8?9%V;uMXYn89RIZ=>L9p}gNpNC#GH|50?+!;`+R?*9?@c!fHyUajXc8t z%_sNL&bdB=C)9N4-IPmH$+Z`ueXuP%ROA`U|5+gavp{}%D*sOg*+6;k;BOs=6o@46 z|3@;2daf4qTwiERy#JxpU$U^uCOh@uW>Xx;yT->XIrDa7DKY&0*9_Y^GuImKEJq9E zS$LGGFu6S*9(z7-arz5a$CastG1>Sp1=FX#SWgk&9JNg}1Iejt6D6y1s}*lgh$qhH zmMe{zlwr(C#e=8~_$#yBDgYJOOt zF;1%L34i%+|2IX=f*?@=tt%ECTzUru#x0k0UM^Z2B~Z!~w-i1ZTq6?X%9=0KUY#n} z?Q@dfk9%|t6hxXv`Lj;~$ft0!ncWf>eKmT4Li>QD<2gt9tC)cV-Rfz&juw$+>(HT_ zIt;wV&5{-!%7?x!t3!``^O_!MGJXk99?&Bz8eFa#-x6zyrYElwyCfK`S$L1uU3%}s zNvE|&gDb9^Xc<4n80;w^4r~X8kcAtyvlS{t##1J77R5C`WBB;=iy@Wqn;eBp?p``F z!)Bj-H<-$F@8PT;bQkXXtVIf2E_!wNgWWs8&ENF|Gq-)@gt|c9xi|59>DRU7+%9`4 z`F%$5=FXVIrg_VKm3~&+eGVj*x3X-KV&UO-d>pw z6Q}ddk=s$NU+NcyRmtXji269FOSZg2!cGEHmdq|iyhmYj!Fq$gKNwo{Po`gMf8Q?A zWcuR`9VbMMD$G^9Rp@&`W_k3)DDCLwhru6=R)ep1h)Kcg{;_#BN$K5Yx4E_M_`coe z;Eo~lH|`dR^MYYcxgFQwS$?fZ%}ujsxtX8-ipcW(*0Tpfg{Awu-uAk#ckr!U+-T(! z9)G{ISf!oYY7_eFg^c|u4O?SUC=QwUv3K;)NWCm!D-hIuId*x}DFODfi*`6$b??&l z;_(mp>$?tG+Jl)d605E7nhzl^np_=Y<-6B4{3#aZuImiN6zt1vS}u6(AH3}!Rh(Kl ztX22@M&D1%Rb+MtFPD;SPW<^#*L7aaogCn0uIpN>p~HIzsaE(Vp_FJ!;|JfiX8htAalMqhryA>La10qsq?( zZI-3ob1Jh(CB%)cG!0s2z8(=m1T6hFx{xK%;-?|wLM#UwoKU7+a8WpW!^!m*4& zlOHa5rvY+@W6l5I5s_QfS#G9f<0CMjiV`j(bl}qHXIf^p?~Z8_PFl*Py^ z%tzv&+6$j{3-z7;bJaS#y=wpUzb^>qoX4180y~YCQVC;#%L1(*srSl8kqDdF4+S*| zIQ9xaaLp?tH*h%ULoC3PhF?i+N3Q3Bu*a|F?WaT(;`<9Fv4CYChBysW=J07yj1w;C zI+K=X!YjOm(!;0@Pj!HM1hqr46(T6L*W75CKR$uW!lxrgis9vAID6P0G~oX{bHI1s zU_x%-SJhij)d|G6y(`ZBpy2bNCZZi+cC*ST#5?fv5sx^5dDNi=F?<>+YVX(&F0o+k zp0_=JUyeg0!KT&eRCOxK07jh-aHv75?Ij0fOD}UbK;9Nc{Ym(IXb;0L4qq=a88s1@ zSKdb=;V=U3fiK)T6Jmi*eIBE_EzZuP!yjx6;a=e=baXc+p`LT@_I)x^6!QFMwNc-} z{#;vm?cWyVgy#s?LF)C*awN=#tMcJ(K_!n3-yPiqHz4)v%wdI71ad&`PewLC z-1_0e)IuN@?hv8CK$YML>g!aSFi4VtCmGx;EGsXxLWe$?9mm_hKMX-!z_YR);W|2b zs{p_@a2@2VvpbBeU|9O_wva&xeE0ncxPjz%vt*qX$Z{NL+liF8vW(EO1qDB80-+jx z9GMK`TtPitGp729RfUHI=Zu>Dw0rr%Wzhq0AMpGyJCmU*W@=UXi6mfE2``g*S+gV0 z5w3%rme3Z+u?q4Mye+s82CP(fYq$Xf;$b2H%MoMI7~PbZ>VPPpojp6t4z{8DM^WJa z;C&ud;#7zarX3(|3l9sHmEOUp!q2kW3V%6@&IcyK3yI%6aY-l|E(@>ruvl?>hn#nI z1?=IMa3P2)NPlnxGv+&3MRBkkJ?snjCzL;xBeLNCFNJl^j@rv51)1fQxW*Sp%rCeQ zY&*VWK?vnz;HeYoYrF)n=9HxwU!5{9!(*_)&Dn{3 zCJb$+=|#N_7mFzoic}YCu3Re9Ywo;j&j6DW^sGbl@^qOd{9!bxC|ly$Hl*wB=lh1~ z!=|e4yn|dwy2A12f*iLbA!A>t#B~&vBQS3az|Wi`9u!DET12uz9p&73 z62*rpfhM071)?a22ikv;zxa04W*gM%-%t$JJA8DH85kJhay!?;(%B%h`Iw-Wob7q# zd&O4+tFHxJmLxQp_LZM%lxSy=?L<;wHa9Oa8J2=4IuS^HMejbIp-rVgnRYJ0eNbGjP9kdemY{F2mnf&d}#8+-p04mVm;3N1{nlLk9x$*9Z zP!zIXe1Lzmv58~!$qW81(h@4@{zuP(PhkGWN{UiX%qvV|lEW)NmOz_9jlFlcF+!q4 zL3AtJK8kz5hnA$!ECSx}_O6YARIy1zSFV{sc0{rw&W_1Zm1*v!u$~eU$M2BMvC1AENTS}s zka#~N0u|WQj%ovjbbv}7!1Dk0qX5PhKAUOs)_PoGT~pQmQ};ohq@Lx1%UUJh-K|~g z5}`lssvCdopd$gJ{z^QI8D{U?;O&Ua8 zp6*^Q+vI4|P6;r)n0|_C(T;2mH97EJ-ki|U5&?v!&~~dgq@&emII~&;I=k_EH;!t% zTd(eV^8#4=MUhD3PGq+mGy(+$1s_qsUp$~Kh_iF1Mb(vSI(Q)kYc}Q-0+)3{c94It zHhy8_NKh{zl77Q;{`u~6O4YEw3vf8p6G>VuDkkk! z^S+qrfBFz_tqJ_xm<@*c@rLfi7rzijj-xhAqfO_ABVDW(k7e&qOz;uF&eU{|I@M;_ zanWJ*j_28C6<0e2p<7Jak6s8h?v!^d@c0(Ua;V~PGn9{Cf5 z^jFG9+;;wXMItSV&XQ_*#?u0A`l}aSuy=75?wtD4CS2K>*m;nF$Gq0>O5eo^1fyrg zwwA5Dpw|8W^|hb<2xM87osD@W=glP!@%^pw@7M8jtoHYu5N~)Mqh{i+3}u?GoSR%u zS|39xnfhL7(z_j?Hy<{w6aF_W7d^mm=}RK+d&wZt>-!<==YLRN1VZLw8(mx1lT5zM zg-slE_y+>y5AgRdAN~P;1?9_=lCdB*Ivi#SA)6`U?N#kP!b}})JSt&zz|nAPoxsr_ zeeX)A5X~Mj{*+c4J0D^4Mb|2x6?`aJu{9Pn`jBf*#?@Fv&-m^?&l{<-fO<1=fK1x@YrM0Fs8Rp(IDs#&k9k$U^WYvPuIthO z#^A=!V*)JOUr~q@a8Hh1Oghim@hkE!^JC)5f-PQm=<#!!whNzmrnW9v3<CXI*Lkp~vic~mSM6%G#wi_mxe=Y?)f5EM31bx8+slRHCiF^lKlEte7^!2F=y ze*B9`TSQ0=%m8cYidMp}PyEIPyg2>(ps}r$@o%@g?-f~&icghX0H)=O$pEr@9SI~F z+3JBp_BR?6C91YI`c>o^Ie>pUG3?c!1N&4rv-@0l%-dkT^Jv zoE@NGjwxbBfhxF?0(_-EDqv)&@Xsqbz|+8e;_V#3hjwU-=#{(eCQ!mU{Qan41yHJ- zhH|}b66zMDrltbub8gujt7^7-7C9~{JF`XQ#tos3z~YK%#G|P#PQc$x$$jOUKh2!n z_v-xH?<}+hTL`(HUocG(2`2_HGYQz8l60qEBSM@Lf$~6_SNY?4$@WxS)2g!AM$}f- z!EV*V*QRX$sf+S^kH>W|Z+z$%=Dxoz|4O_3hP`^`?DYcUHrC;<(3#OrjF#5@sGi4(;fZ_vzYYh)dBii1l5^ONdH{Vu|YkD)G+so zF^x(8#XfbL7}W|j3-ufglS(f*D)= z^@(BX{7EK=T^E>h@s@2|N)UCbi=dbPkR~WNQ*FYqIZ9K;&BA@ZHCrR+1H)+?{z+9J z$JBl)BKcDyy;TYjS!CZWq~b1wOSFS?6!Vg<2)th9!(I)Yc+4BF#BSHY*SX?%UQd#r3exBGP>EU71MPZ=J&?pk^N&&C`|Ox%=TLvfK9$Bo2Hmfqz`K zHt9!caqM2}#}`62wssyIcZz-flb=T#23A+TfxNbMbvA`wf3Eh}vwI1IF*?qsirPV7 zNvR|L_yC;qYR$SZi2SXFM=D+X3#Y~b$==_Kq~(WlG+TN_lOA6PPGA(m4yjfrO&~_B z7Eb!e`1PH4q#wg+2=YuhhC~~bj(k)tZv*y`AK#QgZ?;C6aCI1!$cHcg!7IMc!}|Nz zOWN8=u#0?qFed1_Q)B`qT^!k2o8_gen*1bJck!;Vl>OdexDwBv^70iVjVyH`sffvl80 zJC|m(`hM0e(@xppkf4SZhJ>%-RhskL~&Q zgdDnyT#TSwId!QZZo?=pJP=<@KCnQ!{}dKACoSUScU4SPqiE^EE%pJ&$68F9@h?Z6 zbk#xD)3L;aLQ@w}!?t7O7`hUTgH#NPT_twPD_U(+PZ(&yd0iFqCwUD17h7i?7IoLQ>p=vRl#&JkmF{k&MN&$-Te=%jq(K^_ zLFtfg2Bf>YQ@T6%8lUHVzkTd)|0^EMkC|C(-Rruq`#L)z_9S$GntN9JI%^C?iDIYz zC=puj@y%?!KAc+dNZ(B@m-`-0~X}F zHh<@h{HDaluAVsLL&XsQEB=$0Sc~p`kZW&Ntpt}YHt*4vl*JdUk`y0^Av8;V?o}&U zIA0}#nmi>MV|@t$4?_*-do2^qa?w}<9{m+$42=2qX>sc8&Oz0yJa#0Bn9j);u*PR% z$a^(gLLt6`ms_-GVzA`yYjK#ob3+LEyEetVZO9u@rF83m2^$z4qf%2PKQIZEEnd!OsLOKF#l3svl z!C5cB!@eR5I}g%nm?Cq$o_%6VpW@xQ168Ft)|L~49%*--MK8yiXPVSgiIphstyh}0 zoi-T}&`oiqSL`h&@aqIMdn-A;YArInQXc(na9a4r;3QyY-DY|qqB?Mzj;hz;696TF zIQH|}7LctMLi0`#3ueo`**dTivON+!^-}yX@ECZPvxUsx0<_q=CKQik5uftr%_at} zdqqXAM$S)s0S8KT(*IQypO(WJuLT!pN{`yFv>3Sb-gg+vWQaUSBx)-yH22Oc8b#h^TTiUhVz18lhvX=`q&)DyT`N z+MX`AF12Lcyg?wH_jhGS2rSm>;lihkkQJ7&%Kx5kg57U0cDrqIuR-A!OgS%C8m_|F zr9UUi4JP1^wtx6WuJ?)x1z6%9w-lDju);xLLy-4RV!iXfcY)a``m|k1ngxvWKpM+7 z(9)vcN$+wpCS|ehi%K|K8Lu^%wNnHDk1|Hv?w|3&5=C|Gg-8l%)9Cm^_tfwScA>_9 z#er{9kWDT-hs#1aVzS*Ux$4v;#F*}+>p(=3RBWz|M%AkM{(!q&rs)gfuVpq7U#$I9 zma-FQQ515b0~i2i0vlu$y`K=Zg8_j-z)|$hcYiF%u2{S3f!Ku0bs|Q>sE(GGwsl$m z(gt4%Ue>~%T(RYIKx=Ynbp70IaC4uv9M|4~A@uOd!b$5{d8#(t3jz{J_K8X|KRA^E z`rKAm5|yWH%@CgvvrXuNhXm+AuO zIBkz3O|e+XfpAE=*<58l_tD8kS&{PXc_&lmI+0VCQ@_Yl$V^qDanFiCV?;qz#|I&Q@Zy^Hh#1#ZQ9K=frALxR9t}bE#h2UQxIfUs}`W z6)k@SlhWU~YC6Sy14sDm4=VO|Mwj!x$J>V;D{=KAe}1k^ggoT*Xuv^DoqaqiZVcjv z3p~C#y@KE2<%t`uzQ98WS;Q_ELOzY(P31!-ewVX&^d{QFAKD!RZ-QWPXH;YSUBQub zW_!i!m(Ia8V7wWr!#0EFFJ>GI^A!oOorSn}KvWki=i!d3fvS|nWn5XYVj^X&4wT-( zO;EH$tY&|diVu!p%IwJV#%;|V$Z;?kBC&2w)DA>}NeD7A3^`e_dx~2qb)5aLEB*Dj z@1tnj2F0ZkC`SZ-RSkA+Dy~92Sm@D5KADAqQ^P%X8{ z8AP02vT#ft$DSCTtT`8^h{Q*;&AemxL`Pt{JZBz6R#GQz&Xh<2z8&f$9^);ZV9zzT z`wE|8jwph?0Nsx&xc2a>^X>GFmD9x0vIds>gNJ+hK6Rc~=1@*BmO2GhwHd=bu+n}F zDt@{P67Uui0tUwGk+e`(ec{qk>uRu-v`!+%fXTA0ZTO&UyOs(zu)2 zIsJxNO!^YYb5d%Ld!|sJ_R)H;^ho zXI=A#>3SLeXFK>~r85Xu*ECl^wjU12w12knSpj^QOJNSUX(X3vn|yZVyS`^>73(Vz z&E2*Vfwh)`A?kQ(1*O;zyS?S@d7~L22SN|2*h=yH*d^$0@{WW}s(n5OY)87TP>cuL zk*GZXM~K8$VE(wF{1|&U*I3Q&F05L7K*>KP>FnsFDhLe2cAXswquH>y>~X#I3;6g9 z_nT;9cZl=WM3~wiw((5AQNSq)teXGYOC}#WV!yMesE z9#j+v=arxo0gK9<#k(cIZj0X_#abZ8FNvw$aaZ){SEb$(^eybak3S$(&#My3+l< z9mE4>SWci-Yf~Tdl2S+JRO#;lcY^oPlZqO;pbTR$c1}ALWsN@7DYJ zRM_aJ>rxzF#De24Ahu1%8Ae+1>+JKm6@?>t7mT^fA@~ATTael{Cl--QaqUzDH&aAO zFrZGMCRIqAHh2G<4(M7e!N%gfuWC;c!$t>^re-&CG33RmzUreL2D9gjuL={#P3PyPvO$!a$KW*nR3YtxI=+@lXa z~0rJRl0p=w1Nee8>A+kS(x)Ms+1S_i~R*e;hAt|+H03?*Q83)#&XbiarWq*?cYreJ{}jBs_(j}eO`MWqf| ze2|-Se3!fEdq1c&I3CT5De<{}Z{TLleyE~*azM{ zl=Y{UTR*PBOw~Pz+j}DbniyDTmC0t7l<_WM_;~hOk=kSKwVCW{Tsu12bIuna!I#h_ zPolU)iUA&ZpJ}_oO+w4#L?zNo#nq{=LXYS}z>E(im=fKsnw1s6TF279!3DYYZ=j0p zk!0v^(K35~O^mP?%FqZsoJUiGcp8v22pH|{K~<_*R#$;`%MnnyV7rsAZ?kx>D?y*) zJ)#dx2d7(QT#-A7ojJbx+XX9#}9M?bESmeT(!>yxX%FE_ZhApR+ zI2atOz}SA4=6w$=9yR{kUoJ?Xr=g0Q_I~p znqEZl@-gUe%Y9Ph)2`8+he2{dg&yM8?S`Z!@G z*Iq&)XFhv_h%G)9p7#`8q|OJjMWz3p=p#Y_a^Z7QPaL=x|fP)qXc$%u8aIe z(}(~WT;e7giCQJuanii%9}I*j*rPN+oTXlKpSN4rW;Dm_`g_+- z+cl71NS!7Hk$U`06`K}KJI=_AJz8gMIT0MFU!4LaIy!0l71%oZPXY%8i>VKZF0}O@7 zfIK?!@Esy_$^GB}T{LR>$*`uWPm9ict)5hmpr+Oj=VqTUS!dVD;byvH-pGX;LtQen z65@(f*CQF7EBuQNy6E)n}I^0f7bmu`p;?S>LpS>5plJr0PfD1NGX=JbN81^jl+e zGQcPDVRC~-<0mry`wAZ6zg&`jGjUbh`T^H#rPX+MF+r+Lkr`sg6pwwM4;!qewK9FM z`L>9ADCp&@ub6Ad0ktv_$g7SE=-{hoVrMTe=rAd4UM$R~pWpmY>4wkv4c_%dNk6~_ zx!UbXjZNBT7U|F6#(Wk;aPZ>{!27I#cg*cwe%_Xr-Bo-a6KHg;1XbU2q?|DtOjMaW zQCR4DXyi4~_=Q#|3fyo}DLZmAd)cHxtU;-!AKHWNmA4HZP&bet%ivx@IWbxRObh%S z1%NF8YZv?w;u)S`-b41=o;3LWe9XW3=-t>wRe{hekh?VoxDxY$oIWYnqD`AWDE`?K zK(qRb(RFn6-#uX->#j9e|$1!2=17B z0Sgmn(MKMG1%5QJik_Jb4I{g1xHy{6#oBJ8FVU8h6L8>x3X7xE1X?9!Ufr3xP;ZtU zbP`_nX+~ zeMN3fo-k8+6RJUAKVjJAUNK+%OZd}u?t$AXsUqc8*#$HzOOT)!j8V_HO|iN;q;-*7yR3n;VF%}4XL2yAO!*pPtc!_R%Oai%8&Y_@F&1yk*8(50fO|CyI#lN^w?7d0 zx>#`lEzCEG7u6v+;4P3lB72!{g<$NyZ5PCiXt9%dq88qu$G)!o)2I68^pbr#Jc;!1 zV|LumE6j;ppS9uAyL-u7A>mK^sArO~;d?IKT8)#QCFmy5e!enMS&}8rmtkFb3PICw zjO=ac!@V&)77n}hLt>g)2MmaUhN%MQnplzDD@V=Ck_vO-VZtYKoO;x^X2Na`8c9<1 z@6;G67BheJ@PNCK_5C@13;)B^L*aC}6dSTi2f(SkS}0TLw7b3L>SDEsdCrfYer(cQ z>Ud3}mi-vjM`j(iFTiP>=D)hYEIt10f7~yWpN!qWp5C0B`$a$Wt4J)%=V-$qL+AZl zo?47v&OXZCB{UIStD8pQI2Pca$n>%bt;oxK%XD328`>A*s|Ykc_s_^bg?JFGPFG+? z1JI%$+oZyHPD>gMZ)hUUVHrA7(4X<2Dj~e|U*9Nz%2vT=F}1X_$r!#wUDF@%GQD^K zxI+p7c$48`UJUg!gfn^B3!h5A(|%SI`G%At$wArcJI0*8lpt*S+caP+O#j=koWpKA z3!9&n9tjvDirUqxp0%z%lZttJ%HK*W2S_Wzx&MTZ<3EKzx)Ac|7Ni1iQ69MI6clBo zfZWZ5u{>7%S7Pl2@JZ3e!XTtTucCRA;c}OmkQ|kD#ylLGfMsrkauw`$K>3-rs-AU! z3Fy}FJs_OO>Fm>AmgY6H++;M3(rde==1;bqcInA=uC#ji8IQ8Q;^)L}%~soR6A2;X zBGkbx)o-_Aoq;MDO8D7FPuiVslgg* zzt%-Am&}dH7=)UOo9Ou`W5z z9@nym)pb<8J~aafXV$b(=WL=zY!WW2f9sEr75{vtE z`DVQz#7j2YU2aVdDR}**F4zD-9P|4*TXG2AB|QL zI0Ro*`Cp5}X4@dpSTSp1T zH0HtttmN04)5SM|37!55Gotuw~l9?E3AQ^xVIY0IVB6GK5-%t~6 zrDoe&3b?Ztq0dyh?$NViRIGeWfncSua!XcY)VA)ap&a^bY+_Tw8Igw|2e0 zkL)0#_Y`q9$xLLK`LL9iJ)*2~=T0BGk*D4;bUeEeX~X_T0f{V8{F7V3nrX5TKd+|k znW0nY;24v597_mTbJd}xg4r8c$4jNi7~QNe_xDD>ui@4yU$I0Zh>_(_bP2{D{bF!I zrMWKX{jiF8B|)juK?6dT5oPyZU6A=%`frGLv6|T0)7zz}eyUx`V|DdV7C$IW0sjbE zniVAiE>4@qKZWE|h8ZT0?H^o>3BC;f_y;l8=nCtzgWOk`^qZy_a_;5G!*I2aw$%lQ z5C7Ooz1h;0bFm34(J93h+=h%DH+4ivQIN8(^}T?9FX&psxw5i}Elc9K*ihq}@sO!9 z6SlzO%G)wjDFDIAe#a`8Xf)xw5=R>J|1Q>}5IEzO?*wH&zL6BU{ANNgc(X+Lqk{?F zxB42WX|`5G(axL$T1$(-=kJ-OJ(<lFsS_Bxxs9VhX`$(E#DCZQ6mXU5_nb8& z#Oh&e_(qRvz=pRsGUJxDN*46e&;t0MI(ymg>`zVm#r=_S9e3hsRcMog(6TV$H=@Ms zPB#^`-F`V)ZgvRjfRcci$NpvZ$9m`C4BNi8snU#rz(LA!=nrU z+jIHjOena>Dga~)Na|M@u=@ZKHRkOZsIwlO+*vqWdhR){9KFCSwYzX(sWoznGiQ~( zJ#sOt$WknK={AO~M(X5nUq2^^@tgt<&Hb0c2awhWGFIpy6+42gWrH1D-ThA>Ae<|H zX9K`;5~|NF)&INxZtqRI&;$TEo|Oy(c!q*wfPQKtc(-!!<71Vp;m6%mt!_XUCaqcu zgD*tFN<>4BMeXnRI!T#~c4M2ugp9GXCzz6a6*gQ58Pq}{Wz3it?-3LRUpUxl8H94t zQ$Wfp?q+EHBMyg^mIQv+^dUo5pHA&7r(V1J z8e`#4W}h$jB6Jy3;r&M%;C=!)f-wnxTvAW{r$toiRjvk1H=fn@Ny|v%IS3TP`aGbb zVqa5jU+C)rX=5FddIvo~k6p_?{)>lgjtRcCx|oytcg1-h4-QeAcW+2mhhUFBZthZ& zhTrCDLQ(HP1AnsK8YaH!vxNOXTEpj5omj6$IIxZiaihCfnHBOkxoZ#r4hbkd=aq|# zB;TJCyGJ|M#*KvK^sokgQmWbp*~)7;6~iGgkIvgId7HtxXIG)=JmwA>v;t)A9}+1| z8RzX!AAr*&t=_qr<@_Iq5P;MkOC=&15oK^Gx-q*ck*OoH?`y@vc*F%OD%Jdp=ErylAu)HEQGJF;TF>WF>iytv)M42atNl#^Xvb2RPCkpM2p3yq*PG!)c zpp&9{b?4%9eoQFd1%XxhY%U7=?Y^vT$7=~?%m*KWvd9G`FzfqS z`#`c8q|c`x3xJV+7WnL&H(loLItiJ*%WPFMK{i~}yg%i0mMsmBySJ5XTFJPtlc=@m z*hTz;$Jo;o^oKh{j~VpFbc?L%vXt zW?V$aQscryjehj)xU_;djBbU6VBlfNyU17pj}=6H_lf?%+(dQvM1)w}MBI zY@(ZUvrWPX^5Q&Ju!{r!vRD;C1<~9$@MFN>EauuIw0ETprTqBGVsArw70qBJv#~Cj z1kTrBGjozr2T9c7%m81HzV$xe*~3^kZ}<>5vGs{^Nt2*PjN7(bBven4N1z{&i`wqc z>_G@KB{-cgi+p_PH{ z11{vS-fNa5gC~vU)eDsl5Z=8|c(u~pSvJ4upcA;#2|2$Hzj2I>2j(iagafi-IV7Zx>D?oa2kyHT;E9AYtt=VwDjM9d!)25 zAH~Fu^*$5nDY#D3S1NIHxED#wcuz$QMDpwxaQIOCy%bX|+G##{)L9wHHG#!Ikz z$~|A_gkri9fD13ZGg0KUUS`xy0sJXs19jMWcA<@-PCFCbG+!!iqPT2yj%IB#s-s5i ze;C6{*P4v3do~4%??3~)R~_WWf&$(=#^{0dV0#0$5;&1cf&P5FoNt#R=9EvavutiL zWMyYmiwp%{qe&n2iIk%im-frJ@C;!Yym4W9h2yUPhznHP-3E74T01Y<8!kGtlZfuOC+qxhjQ(S)cx zfH|!@+kFC&*w`a_z3;iCI^6-fzkqgsZ$NMe=(zWZn@O@q)HjZ3=Imp|f5aexl0dSG z^R(qQ;G|YOSC8aK@_s3dAj9)dMC%Jv6$mp*wvBKypO2ifxpweB8;*CcN`2m<-BLCj z0><8<@N*meioFXz;9(zf!fUNJFjKG~VS9-M=~2yPbO1^PR+#QKEcKY_hGo-1|0VHS zF_gweCJp{igc8^T79RLN{+cfJQ8IGV;7X4HriO{gK;VjSW|s{E$FE9~f3|ObBwanV z*0r~`CN4htPq`%1E&!idMB=&;4YoGa6g$TjwNo-K^mR`@4U$%+rQhlL#_~CLvN#IA zdf(~l>+3J41tmc9`#e5Oxoo7BN#^*rD6u8jaFZ;6efrPB|;Ar2e6Po>?4ho2VZaLo5Tvo8kzpWD7t6jj%18 z2;dZn|5<@44)YA~!MHFzTVU{4FgBPOsn1Xx8A#=$lq}EcS@PCyZ%`Qpl{}!@i~suz zJl!^xSTR618bn<tC*cH~Nn@sV*j z!Fz5;cu@q>70cd^q_0R>fw{|0xVqj1yo%WV0H72tFA8@7KHhNd14Us zq31sf#;7@!!@ZK0Muwo(i%a@9J1Qk)uZ@!*1Tj)hh5*GX_BCo--l(Zzw$alvIPpf~ zoRyojdz{v6oA}AdWj2?~&ICZXuBGuEQ9^Yey$Zt!Chg{i8tu_j)69VlVlk+&;`ahoSxyu242v9f0Z_cTN zeMnyug*GFQGF@oBbO9k_?IZn?PCJIRDsauHOpWRX28%C^!c!`i7|KG`3y~oR$N?LI zon|J=q&4QkifJGY|51l`HAWqZMkc)%Xy#74nsR=wF_P! zT}SbrIItuTzk?mG)ylV57jJpSRsUo#1*O_lMrY~gDt?epxu2~n9m+EoX}ro?xs>m0 z>q@-)@quSM1#TsN@c|EfLYB%%pqB@F&!1XE5hScLdOiS_(_?1o`^%G~L!B-7$t&PQ zp_(lJt43SxZN>o3Zf-Bl0x{mWat`BsPOtiSTA%N~D-`H77C6l?5FjjDRmS`1X0e;N z2Vy0J{~yP%t>@jj02lC)gEGTRvcz&ehf#f_U5A6xIG>$H5(r!q7b`RRl_l%`a0pKC z-N5(@n9=>^P+SirqkPFst;|0Js&<(|=8}&*9ULXgWPtOi_0^YzQb%G?^n?6fKi)ik zFlhuLU%<{W$D9r=pFtWiOM%It(NHH&&dJWRp1ktDg}ep%Vwv$ji2} z0aw3vqN@b1A-I6zFcQ13N-Ac{B-(Z~N`(Px_L4(yG*yH(88a4gHoi1dBwzxusfnS9 zkpy(e#lj!DQEj&=A>usF#%dWu9^*aQZaXCke7JL##<5BF!PfByJq7%|mz}ueEf3-K zWPxs3Bnf%Ztm!afnAuoa0r7C#2S9sdw%XFM1F?AacP3T~L+c7Uhh zJgko;L|HaG!qNLt0zVwJOoDtmG-^P|Myr;F#e-ss@5qj=>x*2jUkrV7tU=GEslmw| z?QVUJ+@Sq!HF&5(-R+>UyA-oE7@`33Sf^FR{w(hRXfG%!n}>>9;~Za4vY?zZrB3;u zkc^_Vhmy8)IB&n=d*mC(a&U&DUS<+!)g~%!B}aD+6MZhuMHDOLdJ`;}n=93{ z4~`CP0QEC685_tvpe0Bk=;pLmW3W~%Ncm^$Somh+&pQ%;0{UR00+B)_mA*fkd^>$# z0=?0a+Y*jt10lvsMdmvcR(o?E{OX9n@B{*%p=+}jt~Y@iov1+X1wr25Mp5#r2y0i1 zZw*fS!dP~eZ|#n439~5mXb~Ti0%aVmyb%WjI>kKwtprP==0MzS7m~?%#RKOE8bd@V zfdX?d?gWxQDOA31jzb|nK%L@ZoE*C+c2bL6@@{0~-@{RFX%0Qo%Z9J(-qA%vAwF7g zcum%8ORd1@_vLNPg>5wBiSb8OFIppK_aiWu9co=I6W>0O)2XoK+YH8%u}dV!dYatd)G1B~uF z4>$qkgU|hC&ujNoeO#*R(bcp4kkeTpj;Se@&tunlPA(TwX3dg!^poWGY68~bh7!%LHnz>q@_-~N-7OAV6&89^^G4H(FdfZMpB5SztrSf>oN`HxjL zHpbCPV!TOE6E>%~7^=+PXLGku<2+uL`EYl>roRD^3d@r4$fON(uJ1Nwav1&k!n|@Ss|&26&1PJI zY2J657X(ChH*5jVPS5j6GutfP`W;oiY@oU zg-aj9&cJjX(%F{%;GYB#NZCs*vwJhuCwN6E;&B|J{BRH@qy=Zm&3b3?;sO{;JP4Mk zxY2YG>uNalaT?2*8LZDQ zSm6)6XOeT4{QzEWMDTdQH>>Ams};O-|GB(`#J84vMdQMi(+}sQvv2n?41o5MsTkjL zoSf(P+_ip&n<6a$`_({xc?AeS%Yp+mQJ9ik(ahklF?t>iWMLH8?}owxZK= zzKU;$sqz*6*hL<+46)ZrM?k{rm0{(=|8;0)>(TzFARTa`+N&y%%=G+wXyyw!5e>hd zd-o(vH9!#=uP_laaYNCP{%dJ+ek<(cJnmm+>iVCvGr3~6m^8@wX2y5q$6IV|ucbku z2UOFn~nfBL%cN;$~CpN^A^DWYX`HhZFTIHig((#KipP zN%Rl*`v;E3xU_&9-ol)JG57GRkO`zfwgJOvmwj_F1hN`5^M|EeaA=(xZmE(>v=e_6 zE-blDH6EV2WZMv48VC9mqCxr9nLVRIPx!4P1Kpx$9ZX#Fs%H$~-4p!C;>VQ}yZNcw z`g+h>GOj#W9d2n$^`L93=NYBR{)=AfhRZ%&=FD70k;B*Q;Oa0Xw5^raa7j7c0j92> zjq~+|bmIZj2l4g2i~7b+D|FM|`7xv&=APipAmh=K3rJ{CJ$(zzGYS-rL1lvyaXh#X zYM|*wZE{tKtPlN(X7E6tx2G?s1v%9=8Fa(&HuLQJ8nN`IxaB~VYEl|K)}MKmANm^w zKKMDlyz-dWnR(gYjwJ_CWo+-!>ypxlN6)SJaR{RB&pj`8?74k*1(Q2)h@YSu2M5G- zc)D|?LtbspHdft?gV%J?zx_JIDcl-fn$?;B_je^R-z5>M5-Gh@G~QiW`=Dy2s#Bas zZiom&+_GFH@*xE$SwmIhYWMapF$AJM8E#d=iCgaH&M(uH$xtMJe;s3+ym@h!h!@h$ z8W_oT9c3J~Tm@%*)p2F-b10$HeSHxP*3H@naBS+^T`ztchQnaYW!3oHWBFP4Wp4sF z2Ckd9H#x4ojs~IRcM?Qt-hiNo0|) zu0KDUyL|C+70*5$&?$&jlV11#GcmY?fAi1O)t#9zTmtQJ*++FFkIK2L=Az7tQ_jeT zi{znTa8oT4#Q0ajspPdv4XSYK2!}Hw(91sP{ZElOYgLUcfVJ~-L<2C;?!O3r-|jSD zJ%K0d-NIFAn5&1YZGuTSEt}=E(p;Xt^zeOil3?8Ot%+-&_ADn6K1d*h zGR@fIJGJjg6&dbIBnx<`#xRj#WmlYTj{^ptm$(LNI{Q|o3@{P6dzej9Erv*1bo%U& zE;&?>2I=5(WM9_!$k|VyxrldXeeb3-=zCN@`8}t|gGi|AUM>^EI}I6wWr5frQ9TldrK6iRK7&a&^kDMa_~w2Hhadk;q<$_i_y|RC45X7glU11| z*4yM#D<2E0laV$#63zP2nftXMAi^9bHa z0GYO#mg@=4_1!GWH0j(_<_9Z`8BmxbDb>vIM+=I3U(rs`5x}=GuC-uukVg0JHre^} zj-8FtUhO|U!$kpkre%F-aLrOcrTHqWfy-0;Pl&vpfQno_@k2E&L_yE>zKF~}`#JZ+ zqT6uQB8m|YUji;9wP0AL3Tbp%(L|m>he#pSQUx^G+Q2*o_>zNGh_EhvFAG8hOODy3 z0I$fOS+=aK^dS!?Kl zNo~q)#0KR__u)JafBQk?j-ewuZHX{}-h~VHPVF?AJGdg+`{8~52tuKhD?G5&0AHd(f*`<5lBXpFVo#t!Gazeryl7MYOSAcD|mrz}VSK4?Jf zq#a1XNJPtKAQg=(1*U>&7cIP6p+H^|+Hc0pvO_^>(0F6cJ$2Z%i%7xXW(AUkohcuF zQ&MfRha>CG7|TT4I48s#MVLK4Rt)etWyt?7O7<)vG*YNY!{Pgj5U-T*Osnc;`!Oy0 zK^L1hwbnoX5bgA2{V%sc35cgvoEWMWUQD;1m0s!3V+wJ*2?%1$O6pyOFjG9PEGGEX z1nk)bUJPtA8l%>Ok~92AcMC=Ywl`iGm|M(eFJqCwa-Y>h;`>vV;>tD<>6KZ9XLxG(;M=kITYiLU%S*S8fSClp2mhvON8AXRNnhd$&6#!|DD6>27M*v-4;0vsA`jD zs6pI@hvWkb;ueb=^&8Zg4eI1gcn$Yb!>6y|TL`vd=CYCYz=s@*v~jcZYgVw;zzGBB z-Fmm_j>Pu%mH!K(r9uR)j+I} zIA8QmwTD6!%7$aQs1@;7zSCy4@0*5Kn=?BKN}Za>_Qz*>Wtz`kM;k{0Xl3e7f~r(| zoBvUcd%>)meof4fY>SB`1fqf2jO;w=(hK!G%O}{{y%`i_CJ!%hBm-TD^kk}A< zhj)J|+cAlB=aS*3|8%-7#!ws3^0mW_RS_V;>qK~6=M?UtAK%55h7iE%EzFesJbJw6 zRwbcS7W$kDG3Jjh4IQCl{>z7-$H6EME^kO9Y74TXu8hFoS2pf%R)u;y4}d}l%sUY8 zaUx^pD4Pq1Kq@GFV}E(v8$uiZMjEj-xA|WE?rfNh?xVA=Tji}y36P$9w-LR}1vGIB z@%8Hjzkr_4z$NOWVmjVt2LhMfOpk364_|~bkhIsXzSW1vQ!&A=?s%}=j=H&B8 z`x11}CWpzSV8U;s4-uG{Uz|W3dJBHBjl_a3=7%c%)HzNNK6ae$no@5}@ONH=wHrVm zS*_l7?V`QGF~P|XB}pcJhp(@UQxF?L5C39_5}o)ixGxEunWbJwV)V$1P!jHIO&P|- zdn2TOyO(f9HMEzot`8}#<9+U8Fe(_rL!c$9GyOe5s6w88RGZq02{!{ z?bXS0VRfn5Fa;P#V}Vy{GLw|-27?KgHwQyWPv0wQ?J!(X#KJY})$BL>+zq-8){qY9M zMBF&*&iqjBs|4qk1h$E3mlcz%Y*`9rJ^T-qs2GQ(O~5%1YP940R56U{3I!)Y<(Edv z^?>{gn4pI*l&lJtL$n=vvLMXB6@T?0rbnWp3zFq~ja~WF$NwIJ0j`gH32|3aV#zw! za4fd5#A>sL4=jNqGZV?UZ7IW{L1h%%+UfweNmqCkbsHdxebzjGjgXg}67NNqjN%(V?!7ggaz19HuT`m&9=e3`QD5?p1*` zV9JeKyZkQt*g@{`fDgtT$k?49whpZgwZZ)l>(pAMzU z6``R`-fVGjSlR$%9}s4MdFVH$!9e{{m_v`uC)c{GETcrRTuY)@w$IKQ2_5tKk;X6h z3w-sz(ng~K(H`TWDPr528Kx9`^bn-OECnq2v9(=sIn zIpo5Su|Q&-QUN+JKgyNg4&w>^9nK)=y$In=c^E}I_<3%5YBH@hJ&#}ue z{AoPX$psWPSrh?BayYwJp>*wY>l&~0O@=)Vi1Ye(rpV(D0=Zb7&rK9}{Hs<#j$_Cz zLP(FG&p0mwwwwyMM)KMO7%iRvct!|}UJ_|;S8TK3A!bn!t1(E?3txY+W+J>utrQn} z%srLDtN4kxPlpc@?oX(#d4m=De9XME#8^U;2(*IKUn|y2HqHvX&j!kb5y3YppWmBh z`C&n>hPz00XCs+#KYQpL#3mzm)=wt^c!RQcif6K@il|d1wz@#lVBU3*?a%|_1wB<7 zUpuR7Zha+oWoi*J(Ae1}%Ay*DxvW+xIa)#7C^rf)t`+VmSPz zGG9(#XrF(fE_+QYE#57+og-fG^H|8|z~P{FG0{WlVnKbtGo|+8#4UBQ)ny=&&B0_N zmFFb&BKb3ocpm?Y*!wTT#wvI3kBn{vtIk*frqOJ~et4t3dX2|sCZ8^`Gvt1)YGEiv z*>#*!(S{O@TG#W)zblE0;w80o40Yrf`w&O~(A4?mdbDC@@e4Q;;S>u$7)`0QpYP4K zw*_Ha_3F9XVq2q9iw2;}cGWvu8R@*v`DAYKD;#%Bwmp)+LkB)nY2%XxlV z3%7i}+DO4vdnH4yLPiw*jf$WKu`tCo)Z}>4ClW81tZ?pQZ$Y!@?Eo3&?+Mc#7pfnf z@mr5^H;FrQYB3>>_zH&##XVAI?Kb?%i5O$OEUjl`^<8n>t@ zxQ&G-P>9m0oY zjyJ+Qja~K_)S}!ve2(+E&kC`PQ{VL_%Wv2{I*+KEnDn~IDx{N9n-t`JNludM5YAXI z{&8I#VjFzcvUm{lgqpI6goM>iyztgUei%c@mHNF~VV1~aa>!aX+)wM(3Hyhm-<20b zF4!YSdI?nWYl-U%brCZozxs+GdOtgt(92Q-mz5S@o?N~HsSivz!6~+7h3~G2@}t~V zjtqqf_$;L=Iz;%VvI!G&Zqk$UfKj+ee&ekqE8NmZlv?k4LNL`d%D1F~iEnPUvY{R> z+8=p}$%L44l=Qt0p+ARJj7W3z7C(bJV=oB%x@lCW7=uvIgDDZISB11BOD(}fn4mH< zfV7ODaYaqx<NMY3YevSzkf$`tHg=PV((1j%hK zFGgo{GMiC0a*XSTZZISA+zL}IzJyp)C{;;bC}}K;`i_QF2JKX;7jF$ok(y5X-spO? z?^Tg+d4F~&=c(!}`%Fr^I@$F`LaC&O@ zaGJo~To>DDcMe%vh0qF4OmzOuObinR&siVw^l|E zthB_lKf@h3$w{gG7)h(c2$I20U|ey^lVMG7)_N>EPUzYxvVLY?RG(MP2s8tw12*RE z&F-93f>{avKdC->_?%~}U(Wo7(uBiQRoc$yHP=`k^amO34DX_h1aoRQ(%T}Q$u~YU z+gD#lD_Nn9I#3*D44pYK$w`q37^cfe>h-K$I*QZYJ-vHsNfM1&^3FI44nbqv4NoGw z;yn)Z*UQ97um@!?)SXI=wQ$4t==$8f0>p1~I;2n{F2?VaEE6TVY2O~2ZuUd65y|?$ zuX`%O!@|f&U0;}@^D~^coe^8KG(3*EHYV*CI-1 z@p)M7w!@KeX#sRS{0Z$eu|8+|lJTGKXptqI_cIp}t~H~M=2CAFvYQolR(S&t+6F(= zyon3&^;ozzexid9h1x5zs|1?rcnwxDruWDM1XEfGyWPhsmg+0n)a`jse|p;c(OT?TfvY5eIj>ZEE^c>D;ahQDcy+d;2^{lAq+vHi1tV6NWlj#Y};*F9iBhASW zKt0$p2itVIdG6yCuU^e%J|>X}oE=Se1rObq(GQsV1h z)uPsaP^Q4OTfE=n(L+q6A{9c!@l^Q||2_Eh^(PMOt`BWmId7ex4x+o|CaE;B*dPD5tGau3ksVp^hLuhnZ#b{dB`wL|qobYl2hcP3E14X>c5M<~EbzL>7&*+4Q2K(2FM+$` zzG7nkZDr@LrCl?blSlDGUvHr}avM|0Z~BLE)i%ppiLFwNYaA zixcpx=6H}#A9x;n@N8gjwlblAc$q4Ao7DD1Bthq$c*FT)M}g?oL?TKbJ!Ov2fgA}z z8s#-Jo;|Y4a3Y20Rwo*$m299;MGM8;ct7`tPwUoaR5>$*sj|v_>a5y?e@cl> z&IayM!~5Hgmh&&LCMu%)L(d7 z_Wm_{#YoWu%D#q}z$%hM8gz{hGBGPSKqw(^hE2m!$5g9(8MRV&s1-?a?yGmnwUjEm z3yqqmd$U}7RV#*9$Lm#AfUm8ALxYt#i=OOx++CbgU}q@&M$5x$;2nYO?NMh)nom_d zg_8JcN0`+P9+M9L_b6=1WBmH330^CbL$x4Bu?>JppZ7=3W17HAM`XNPud{xHL1E#g zg>nQcJU_oYTV-LbshT^}Qu9uxm7$E}9ro!{3gyHt1-l=>RlulHbnv)fhgCh%l_jqU z5$PGmuheTl!lj4LR7FxBxDp}Xkk{1K%ivxG%;7+M$3Mb-vg|F^#r76G@Vu1$?XJ(2 zzVycSh)Oey1LqNCW_sAoYDqF?ThsNv&srBV_2^D-TxKU3<)jO8_)`@*-uF)-#un%} zn$QT(w>A2`@nw#b-tJ=cL|{D|KS-gwerPftTKGL`(oD{R^7E@7kPm%#C2&R-FYA1xL|_ zTj?GSgR_Xp?Ehi!t)rse;`QO71t}$^5hX=hLP{hQh7LibK}EWoK@3v5rKFK=>23t+ zZjh4hnBm=n=bU?fx6WPb{@%6j`>y*BYYjTX?C;+D^Lak?Ji1FUDGQ&JwD^9;^JVrn}VYEMYG`T{{3%I zPZKj8vQYv`r5Y5J%b^mxv$Plnnqzjp1x#jJbNbz`G|*L-pr3+bvJf}3ur4devB!BE zXZ=&vR}-=bVHt%-l3;(PXfI~m^sfmm)~GHa;+@uFjcMwW%?sLZa`1nh(i3&;q4Q1B zx*)dcoj1zvk%Pc73K%*eRC4w8yNE?MDOl&~W2!K{8P%R zD>d((^S)X)3-tAVpegV0?!D~4LOM8yfD8Q~3#%8`EZ zUJ!#8sx_$Ge)8&xdyRpIGy=Y3mwMKLGY=G)MNb+;M=bZt+3`v@&g-BuP?3v!M)Go* zVp@l!7`ljImlKxLruWaW;4nZI#}YN1b$ivSHPdN}PpBL((AZGE*;C15#FoS=AP^k3>nl?_2g@-OhIv#ZBe~faBpavs@{Nv+=bV3iQb*q z`;|>gu-Ij6Nd87FmBXi%U)L4M3hJgm_H2_z$gC;uHYmLurYDqX^75`Dmp)K!Pnqk- zFs{!Tdn)MRa);tY;+2m$3v=$B(omJI=nLL z4W)w7dNga2Gakp&hTAh8awJS|v1Eh=TCMc@i%q&;E3C|qc-<6RCTQ4whF_}2<+L?5 z_SkFTbR{V`<7W#9JQa+VyCPGRzEJh!zp!q^SZJrU!^Iq@0gy`H@^M9IuNNvEF>d0X z-^t-|huY1NhJXWStg=XBlMIR0P$W3~@_ZH>B#a<~#QK(PCysYv2-LlR=%SK(y3tg0 zFMGOr)+9V;!KEmO{&HvZN(;?V-dn88#OCPG=9@m#Kz@CR1y^8q6RU@rJSdhs2@uR#(?Xnh3>(O)CV-P6W1c!IhZH6i_B-rEPI%Z(x3E8*bebhx2v>UUBo8Jr|VPFUY?)s@C^Ch zUn=ES*fon&(nlwVF z?Czd^RBSQg7`Y1`Brpna3k=jg!x%@b!Bz<(Z-_qC%j4tI_UTu2)Zc6j&t7ITdysLvJBp@Igp*S{e?CB zsPf%}b^al&uGB=;BT(M}r_dwE+99`hAfKa_$Q=A<9QIMiBHwAT~!CG;6?`+&R zS>1MD?YN-gbTFnD&yzwf%~_nW=E^9e)Bmiw^dUJ4D#zCFHBTY&Q!u`UaZnaq?;At$ zO!`s^z#yCk`;KG&N~xpj!XVVtB>l5C&uzy$qa^@YJTH1+SlS$6qUOpCQZuGQrXO?p z4S%NKnQoilaEw+Bx^4_Hgjx=(a>m`Cq1Ja9c3YpEc!nWLp`1FTQFSP)9d{p&{nnEM zNE}`xbhardsb3rN+UshFEf!5a;Nx#YBu!!xEA2MQwRV_VjpL47t8vra>pWjTMDYW; z$*1XJ*=whqRTPnlfrXSxu6Xez4-1@Mk&0=HbJqYZ(2YFRV9DB`EI}B2t3jG5wtSmmz>_56hz==^2{SZfqc|*ov^@7!JK{aXv;|3Pnpd%Q9r91Uee7h@38 z>Nkih^agKE&wTTaURd>XDy7-$Ca+NN-7Ouw#A-R3cE*G^zLPYSaXl*-Agu?tD0o{4KO?g}GXps63$?ytAchOfVQpgK|!cuxTJ;883hF%RyTi1?hjtR|=QuSxr z6j#BzQoS8C=bhQ<=BeIzly*^EQ}R=-!KMRfq5{md4rW}+T`ER?3hl`^}@M+50&;pX|40xyPv*BANB#a1UK2{$$UjAMQS9c&c?S_Sr3RO zR3hJ$-HFltK2i5v`DH6x*Vr~+ENC6kEEYLZ#+Alge6_&1|ytB2~D;?MM^4LCH-cD zJ@c;gTj!81N7raT;j2r5pS5?t`WJV9zRp7~$6uOUMs2uUo)^xi{88;IALxw$6>+~x zLM!Ny`mBWCkab@d7)6XCo5i;Gb`VZ#r34{|7Qn^V0_bX5(w>G(wg*vn-gwmjX#7&{ zGPVJ4oV|c#_gjg#Q)qJJl(_DIS2Z8>aLd>7E)O6iKI^$H?i34>2c0w{a6-x02*@1S zRr2jREye|f=MKQY!b%OBYGcez{;YCS-U$I<*PO)&Zv{qQ}d2FZ)%O z#kB3K&G)=gllkwAX*l&S!p3a*&%$ZZWM^;eFC&gA;cqP}NtTq)$LiWS|2RI*@C2h$ z6_VXS%MWX@!s?UCmAg-NCzC==lg>X~YTj2x20@-rfKj~R6v#SDh*`MUPPH_z{+W!{FKsU~9*7w8WmIY8*2f34Q6_pBAz?|G6r za-CDUA#MIH;IfZaq7jnUZ3;laKpDixW!l{kLLn1|vaT&Wp;WJtLp>A9WAjMO-6+zdPh7PF*k6GauwA&w400k8;qF+f{W+G5j$Z(V^=mG}6 z2?c||v`u#)=s+!Nf+!rRt6_;_fYit72QK>nR3@utH!);k#R@3YVsHE+`|1$7KeY~* z?@!te^>^~5(W3biaf&c!+>r=OEE|a;z1p0V46nLO$3d_&!3b3o8n0}uq2C+hPc+2L zZX1_){9tSV47y~xHFj@PF-W?fPG2sS)w!$`U=ND>fPER>zNiJN8A44;8s(10YPck^ zuf-c()cavgtX$?3uWV&$N^5EJXKtSb>V^(^k-bFgD)IO!W5zC-?&A|cIz3CmLuyHw zLojQ`$=%gbxgnYNz%e}XHgvQK)Q~fu*=9U40~rH$cGZK-iZ&1Cy`SHiHuheOzO}uz zm*e^>4wNKdSyg0;YJ?3UH4bGXCvWUd{L$-Uj?5V(*_qt^pZ?GA2f3 zm3f-D2`Rgj0;B_00i!t36}t{t0#-K_fe|lc4_+C}LRsFBcQfx+mSO4(c|@tiX@aqk z1S9Z!W5ZC1{zdFAPjc3N;@7^sMCyj?we4+D)%{7{yvczHaH*ars)vbZ*lSmAc5cZ@ z_e-Oasc^#+@!)!1ZFhwHlF>`3+`F~xg?;1>k8fZ2(QJ1K85-k4i87*cIwS$DK)WKD z+vONXrc96?Ke%>adT*K(R;-Q?6Sbz!Qnt>0zxG_wL_D_A-iMdQi$46>6FZ5ws2Izx z%d9huFAK&jx|6Wa27fKjc4x0E1s9J_Q)vfXXkv1I_wrslP@4`@E*-WpCPFXz-ZJc}Kqp5Z=FLIWq)w zrzx_LthW^tg_k^;Ii-3it!n6xwr6Q|2DN{Um&h!2li6gx-CORKQJwOkMZ;N`YkX%2 zXhc#Z{W>nRIvYy)L|hLD0gK`*dwHkO$#wxCpVjJ8h$3|}Qq*TNx$lC&S+C_<5h@>MMFkz!s_ z1mImr4NlJrGG=&Z9?J4$p4enmYLG%A{X5=dUJsin0efa{pXP?n9HN1DKa%WkDG41? z-O}EYhT(t_sNOxDy#V8gPtj6DKRpvde2||^2Jq4D=Kunz-(g5=jKN)K-_p?b#o5{8 zRsu1ZnOSJuaXP!a6eER_wWzxO`WJdtQIfuzv&7e<#~xf(<85QSe6Y=jG5W=z+P7Rw zauht_ggB{Q<$3d==2gvupA1DSqxC5)686rE9+OoDRU@jml&opRY1qI7Imz*eDWlAH z-#)OjswJ7#okGOIRUzbD@bEF)rDyGQgSgDKK0QIhn`0@hO9zjsU2)0!9B6h-I&VmR+!cDYv$dxzGFS|yjnt#(fh>C-UH~7&B|rW?zRl{mHX`- zejFRHg4qWG!k2mzp?qvUY9infw?;s)4VZv;WRP87bTVAZm&oBo_#`+3YEQ=oO~y2JOw3SZNJ^05J#?=Kh>1a{S8 zJr_DNJ8!&IqsRy5%G%nR9H<}lU*6Pug@h@VG$RQ&C1WiyAWZ+ae4MSB_;}$m({Ima zObDKH;dHrM!M**JT39z(q*@)_0h120U+fmi)op3=+FKpuXg$j5ZB`6*fn34UZ>I@H zI5g0DLuxL=J-4G965=t5or|!Q7)u$C$5Ut4pAF>ESV0TG!uw#+S^ACo?FO-hHvu=0 zD8s?(6kdR4`!)N-erI5=m74&SK{tE(_kAE9B(oxF&dk5bp15&0NNkt5k|yJEbE)u% zyTNEu1pU0XTs~n*)MjZqz1V;_fMoH>(sb`0wY!=FXEm>>f6CQPBa^7=Wk%y74qUQP zw~YvRXh2}AoI|`aF^T}`NUs0wP_fCYY}RZ{@Y{8%u$E-^EZA*KH+lU zYhQZ92R-t9fJo1TO2KO#iz{mfGOHTexT* zX8P@q1f;Yogq(X0WWVyQHsh0@3kx$b7W%eW=d;8O)lThd*n^HF>8^@A(74n^d(|4{ zqXny~ayO}@Vwh>k)KWey*X1!7_^knI0z4<2Bh(@4)_Wj^X)w+5C}K?#VtxJUaspoq zn*{%XrxCGMH_{CB;w-YCUCEjfJ!OQP&_hwh2|dWaif|z%LDw<}qmYuI;HF5?f?YYE z)&=RRjqCDIn1Osr@o_MmyRXINh($~Z36+Gv04HgFaMT^i$x*Tpm)zf2>4m*1OJyGS z=uAXNhV!pj8g7Dy>w;xdpAc!?UV9_o2$qQnn6ICUI?OY2Cn4qi)^w9SS2)#%K=%8) z<|7+sp=aN}Ic=uCeSdYlFC91fibXA(ClZr;3fC2f5FPU*1+s*G0$2|1xL33Q}lu#Xgv%V-!; zyxz#w0s$-Vnm*ZV}PYo}o3Od|(e|wL^hnK7S)O%+$#T(BiZP`d@DH!+Z zX5x2>*x3Qv8JRtmdmPm<>%^*-J9%PcehW(Z6*uCR27JvY=`zBB^5OaZd*0SU)1*#e zU)HD_IZfu`Yf~`gQfY}HMeaz7o={f(CgtvC`#fztBwmT?7N4|yXR(Gp;{(gi?9 zD{UGLxap4;lf7-Vxy1A!`A=tx9VUtnzbM(zx|_d((2?xdWvxTp-87areC8xMm*1R5 zukJz9sj7pk#n{w)`yXzr)kVUb^I6LRK>TWcZjKX}mcPOVHJJ~x`v6>K+=SF1#l!yl zP$UeDXS-6@?9^2qCBHwo!86ifkDh@j%=m&tnhxYjRrt@T*dO@(}&6I-#;OiMnMgGt&hdC=pGtlL@1A_d`CF$pI0LF zW!@*G5gZ_c#`FIrROV9}De(H89R;=4CI(a|;tEuAeX0yxvZF(Ek)Eh-eUz>+XTv`t_ zy~4ZMrtCBGJk<4BO9y)ep8aIeT7RznoN+_3Be)7>BZ~v)9zhPme$e8$8{RJBwBGa` zdyO1VCRU=ay6`4!oke&-`=Z6_V>pjD#`ZYZ6p`E1 zQT%URFccUgaQ^=Xl`ka1Cb`TdC$?n4Lu$`<)5yxjzh{a ze*Eq3`r4yl^V+?*J$4@`r>F^5dVXnY_tkwj6UuR>^42x`@T!;K?OB4+I(5x!nO}f1 ztv|We%|fruLx3roxbZaVX_w-6c z!kh(uywo1W1O0*r)YaV2dFUCxpQ}U^@JrrrA9@&LDnK%V^nLi!zx?Bh9|Tm z+0HwtbrQ?#MF;pIA16luL5%)dXc5jzTacu!S_`O4#fYEeC+j-#r)q#?ki z%<}4j;j9Ykf9y!A7ym^eaClJCD%i^5=dp`VlXdLe!49CKx~{M?+YRM4UExqS{SwO{TmB8+lXjRZfH#Yp)(pz4+%*0 zG7LhtZmsB8$JRVs>I~;EkB`d-P;-7biN>?WDA!YFxs#z+bUWp}9S9Bb-XAvQiy6o^ zAxpW#H$XE1ttu$m7nzXhXb2e?$NVSv=f9NrQbYB6_D_kg4{B$Fd=8j?Uj{`<>a}%G z=sC|ieR3*%*V)@bH?9mFoI(k4@sNAr-Jn1iD7bMX3^OxmCYW!SV(ahEQSU+v0{_OzCSNlrLbNoaEejh$~$Kk zWLN(09%~_#hU@=yti6fg*m^0W<|0~glAM5~)#qdADRGw3gLTCJh;KI1+MNcQ)hsoHX4C@!wN@|6+lyCYLo z6b4G&A8N2fh$^w>U3~<@y08G&1O>oWWIE^^ZSR0WKrS$TmnBxQ@l5sQs$z`yO5|G} zslJMg?tR5&W#cf=qpfrnti@cg$S-}LkSBTxx=W_`x94*A93-`Akf3qZT@y<{>>dOO zQ9uUu#@}6dqCK-Eevj3uGkuVI4-j# z2&{wLFozReffW5n67!PK56YydgCXAy_G4n`bAl4o+oVxN|8{Nb-h>9K80MQN2K9-Gu7pr+-fs&)`EvaJKNKrQl2#F)Ky4}b zC2lcJf)KVeBg; z1&JL>SS+zAJt1axPjR37fk9C#(b{`^uqLG%8u5Xf{x<7ZB zh=~B<3@KBBx&$T0brMphBpYvcPVQUN^>$v|eYH!twr&|pS-uEI0a&9%Taw)`y_t&- z9q?nEpvC?V*Zr^G7n^b<$IX{p7hQ8q8D^21^X@6_Y0uPIPIW-Lga`4B)tSMdr5 zBVcLJlcU{(Y*}TJ2P2*=p7zaUt@j2KtkHd8sU(<(V(1|8tZ~k7$bJxiE&yl}#DnE|N!`qvO(4G225Q zg@V>s?(b};*Ox}mas!H$tXRLZe8pvQ9!~#}_?RQb{!RhX+AP<4Az%+S6dxV%13&h7 zK849AL$Bp5oTe#RY`$jhLg{=v5&)@huau5&aug!KzW{fE7%I1zT-9DfQZL{K;ran> zu&2rpd2Q~`QwDnf4^ zltJ!((oZ9Fvv^K4pJ9Le`0ut%%!KR+}Fr5F#q*qRVsqS2@yUcQ< zC}R(2cC_Vk(Mz3R0b8*WB^ru+r~ORP#N=T~uZUihoJAk8wXU^i7TD4JBs&h#cjl^4 z)WZk01Vresl3-OwqKxYnXC73?C511d4PJAA{m3c*B+G+M)m}Kolr>xT><8xBh=IM> zM#?=vFJmImxUqX}L9-YkndydNlIYZEVqD+I1~QD^t5(2``EVYNzI**p6UI#xPd2@2 z9dnRLw`Dq14)q3D3SSoh49jOwFx4PnV|b8fzYaVvmdr;f zNZEayAcm>Xp%5KiY{P46?p!J7LQQ2sK8~#4*}e{n4>w+CzHXw@d?OD1W8RQQF9(O} z$jLnCBP9zgA)}!q=-bS>FB@|hR5Ajy+4R`*631|v>A@_ESagE7vZIaH$A!^Z>-1A6 zh{MHeONt1ZRd=ounnwc%=ilB;X7PqM*x+`1cmsuL42uv;YU$&WeAjXLbU~@edGvyI z24ADjGa^vMiqI+^D*KOCe;5`u0V{a_6o#{qC~eVw2Otgbjez@gY$aB*O7!;m3o_0L zVD8S5lA-{I=z2{8@+WdL0)Fu?xY*Gqiu=2cAm{cw?Fu>Z2R9ndywn-3q^s2E;O>=Lk0Dj1H$)^Kfz2pGPC<9f(rTMLHG5P*6f6C#&?g<5Hm z@c%EU{NTvokGNpskEAO|2!M-Fe1)5mp;rkw0Q2Eb2@KW?u=*VfK|BCH5T=+~ZgLY~ zc!wQiAX9C46)y+~{DB}LK}LiGmIgN<0Q>oXl<*%wP7HkOw|}8W^Iy#XXdx56rO`-a zcD2q`PmrcTMg!up$cIu3kT24n4e1f2Z0ZAuj7(|%JBRJhqzs6QAXM#iG*eBtAt0h5 z0ENOo!xQA6_$wwFAM-($X+f)tDaY@22#|aK0Y}dM0f6kqeU-FAS@Ah!5_z9pPa`?m z2v8$3=OglIBI+ryz2J(0)o=P=p~~M`JY=XK5cEj++XpFd!u@Hy3BmGzBX;nuvHl7Z ziotdK-HI`w`@3Eb)|q^+6@7&nAtvD2gO;6Mf(=F2YpO62-|}}L76Jlqh$~wN%uD_| zGH}%k0rp8cE^FX3!YpQq7*4G1&CtL&!ul@;Qi@;;}3ppDb(SAgpWtR8pwZDsX zpb6*EamoGPkx-ZMRqZ;Sb|mS5FazOAXn_U70|2Dq$GmT}85_@tbg+OUk*r|53LpL@ zCj8$E{{OiPzTB~<$j?i4Uzes_ZR)VanOo;eH5AN`4sr*VqAU+LZu>@Ux`vzjLwS>- zx;uHu@|EH0ByH##o$4VI(~9tw4Gw}1CQ&%X-v`NlXa)S z$P*mP3K2j#?Bzvizy@~V>g*%k+JA75Ds#ki)a|@p?=?ZWUad*n>?B>L>H5@X*WgT> z2=yn`M7Z3+Wd{8%>@=pQ-U8<}0mcaf%YDRXUB5`GP0r*iW;sAfG~@wH^UMKT7MSAgF~Aw_KSA0YDa8Zq*TveHK3H`pO@% zbfim@aej+Z>9>9SU6WCN<%dK7MpcyM+VP;B0hlA?x|vYDwEvHF-+_F?IP66ST|ZXyJgTuVnb4mZ#L%A(Pmj3kPLFhGl61ygqr@NK2iw$KiH zH2+aMPzGo5y)sQeRO!@oHiRknEIi2-w1Mooau5u{1M!$K+s_U+q>i_yrAG60T33=_ zG&)`L5Adl3=zt99?T@&(ZUZV8lYXGUxVxTH;D3Wb+BcHznLlWiVv6H=h-^;PyS;Hw z)gda2w_r{nquQYQZY=0G1tB_sFBM1I zK`COn%)m}6QP{=kQ1s{6qNJb7vLP&Ued7aIx?OMcmCw4CgD5=-&+C6KZ8%*a=3fi@ z6@A$UEN{|(_}sL>n~~2AnuD_ZZ80c-I@z9>{khr;`~LaQ;-4r|oVI5S2Q%dRRRjou z495*1I2A};P~)=i-x@$BqbeBR!awWIK=Htw*`Juju=;4Bv1|;q0OwX>0zY@vMELiU zhioJJed9hL8$thm@(joz7hbMD>X03zXGTK0?f=+g%kG>J>BbvSptyHl+84N_yqIp> z(<4#P1>43Gi&(1YnF2y9 zo=wWV_v)}c)osoVMb9lz*Gr=2Lc>8y1+~<;2;#{;kU@5G1t|hPp_FW@?1lPEM0jd^ zLn#pqR~4mMakkDQHQm_q&l=Ak7OLHz-@F3Z_yQd4*-Hx?WOpSvVR+0S2Z=BqasB_wl5d%aE34&b$p4xsV|)I_u?^7MCRo4VjKh*y7jWK5 z{&qa$Mii7lIZF zk)yG9<)S3zdh1S>!WAVE7pF$;Xzgzry)pXLf{goXTw9dX7BJ4z zvf7`z%*^u*Kl=gr znIs);dnbTE^n`Anf>kBWm-q9<>?K@}H(CS%Kh|?`UmSf1KNwto%^FJ%5b;$v-PaPy zbQg^F{x!TA7E5$UCOFh}TA#fs!P2hM)ga}J#PLvKNW8b}t1v)F04(o6oNw@7(wgM| z!0=T9Yt^8FfXA#xg8?9>|KE@_%7^@SS1qj{a4Rs0&CF+NwAAunjF*cFoGyj8f=oq9 zrGP+KJ0Aa(ezjwe&^q((jN7)9swx)WazCKHNzINW`5(3nFcaJY12Xg!ys!1y&>CU@ zKxYLKERojJLpjel;^IUBYZ;jhpz@pqq#E^}ynH49+QA9cHa=|q^!qUb2WS1eA@45- z7vcwoo0YbqH?NN3RowURKZzkRBhQs*ay`LJR2XHu%%Y{-ZV z*_-}{TS*E?T>l8<%*CI6`20sS=3q16WQ4pG zS;9ETOZ2yx2n|OE>#8#m=avvIvoG~SN8hFo*C)GJ)xWCo4tpTGvwLE=9@p%3v8C9p z^rj$tcgG417Qz)pLqC!cEzZI&NkEr(*;^A`L!fHEuvsxnTI+fkcDOxLTTL$PT&P*= zT2_8WyJmFJMrk#cPgV#%<@MK+*8^#O7I2B?kRmRhz7Z!3)e z*NA(=TBk%qF1P!MwV=O@yP$@3@AJ~v^qGabH-&*F=;x>W(Ee8obQcVDQI{a;t~zpZ9pR!J33wn;0Vev z)-e8xl5Wtxma>uDY8;2U!l%{#GfwE8WF|kE|9C+CjhwovyP^-L{1Ph6Q=LtB3~fB% zUzUXE)MEiJm8AVv{gWQ8DrHB1<*gnmO3+0}>YHOmaC=la;VvutEk12U(!~{bdQ*>Y zs-#Ili>n8i1dJRf8p20V0~LZ!8RcoDARXW1+;B$lUem_ ziDy&;JCyu-Xu-wCDKFzD)#np?&;TtuIO%`>dFpo8l#;{?$8TsF>t}LOhd!1!McTxq zemcp0ojs*o)M)dBWq->Xe?95SJXiqXbR1?&aT%>jF)Yh5Y2V+oCG-2d#lrNwb}uAm zN6X5Vnjg5<{2YEY-t7n4HN3`t&k=y{f~6J4iAWVGVfu z{oDH(Ea2nvGh=vPrJ=Wf>{>*Zg{DGB&f0$dem?_N=Wwc|0~QktkA z4iqlPZYWCblEvD3CG9t=Fr zjmg7c-kR`boN@k``U~oQ4<$FfW2_HCQA1XGQrJaW53i>wV@110Tr&$pjxnKdQ5>i@l_erA-@5 z$^~5xPPoiJD1V4$Tg%+io}E*d_dw%6Q~iNS%s(-aOrT4ANN}uIknR3K_)6^-z#deN+1Nw)A&$W>JJ; z>@Tz0`URccfdeW>5gl{%Kbg-IOTBxi z&1n*LV^}*-PE*k#Y9X2lO*-oE^Rc8H$zJ*^cN!`X`urMabL^(13?w?;Ckr_b*^-ut z6tr8zw{iN;xR8|^6lS6{-T~wjTs&Ib7M4`|pjibAQ~Mkw%^9BYg*06f zLJp~p-DzbWOU>7Xut0e$eEEz|kQv~Gh6&TKL+u?42^!Ffh@Di&y5+p{F=BM`lT#$g zqL@qMi&++Crt!!996pm+Ov=^YC(93&GLr1}_ugl=KFy@I%E^f=;Skn2aI0Xb%vS{& zl}O6qh>!2#Sic6*7f?dGXy2n?P|xnm#gpVb)E!YijrWEJ@FzE@Ek2kz-T6FrR)r3+ zK4&m#r8IK&&8vRUx!DjSzPyhblvFn3kAwHokHoB^dsF$X+v^KH{EKUlWQl3HmZi$n zmP`2TrBp=(xm^^LjD)r(0({SGbg%ZwgFqeaY-ZSF^+{LFpDQ2Mn>V*arh-pIpLe&6 zl`d-Az`8$oI3z4cLdeCA*-W1>JKNK1d-S)dp1$Kw7Cv4#>W`B_^+t*8!;XM>?OWv~ z5q(w5G0~pg4nW8V4bfE)5kMhBX9^8xz%Fba^ZVy?KT{ZsS{NGaBZW)DLJAXmMRC2} z?!*T$tjO4G&4bjJC1LM?m6OV3bHEFDDQt^N`F97 zugeYh1AsYue`B@!FzL8PU?pYHXtKmsEitvFHdt+UnqQn&`dH9uQ13V;Pvp3bAw~a- zZz2T&Z_YX%tDw;R4qC~{`O;aOMVogcP60dtNvxxGgV6^iTYz5Ro5%`az%xg4$%Y5F zuL+KR5u0BOe(bY4j#0inb9=o!Up9XQ6V!N-*)E)liRbB<(V3g?gt#JTP0{>m<@@`p zK(na!SK7_?XzmS$#TJI2bsp`2I9N6VuOdhC@!BKB=cI$Oli(`UnO9UBvKMlaCktSOE z)v+J37&0_RTYV`zkpsem=p<|!_Y7(Q?hOEX<^d>Zn7%;K+|Q4Q$sdGKT{oUC?0a{H zGs(z71Cx=~L$}uT?ynU<a7&7(LrjoW zuFYYv;!;_-_C?R7uYXyU$8=$CzNY?DM=>xs^7I%1Yw^8h*1}Uz$CT)1Q5SPD6TMKs z>S+b{ETD2}t;%1&rxl%!ib-YmjSo30`r-A7rkl-K>`H6Chg20MwK}7&O7Z}5Qj`qBZFEe%Qs8;61)FPyLEs0q`u7xoFJga zg@OO1q_>i;(LMDsg#6+WnR3N)!opCAVuXp-)6r~ee@YYxN3nvy8ta~?Z4O0y z95okI{b@#c{XY+-7ees~s66LH?nY^pDM;CRWOnTN`J@z=VM)PUBzp9Up2iV}DW`Wb zPgVf}tID^kl}e$hCbyhf>=jc#FJDi|bQQT{nWH|xb4VIXx*fQ$uxnEHlnKK7TI~DF zhSSYHG{gR{?1oM84hu6HL>&&^oXO&I;vOWA0F7#hT*9a`Xk_Mg<$Zs_s*<<;4Dv1i zNNP`R_5<4rb=~p!chO1%ajlEak=2p+nsqO$SI4%|cA?wHEfJ@Bty|&WNcC~c@>et< zzE~WqYVPpql|43_n&b_>+>$X;k2$Ft5i8uvI#m_GbMyVW1GD4BQ*K4>#0{Yz7wBv)biZ3gkxV`2qn4ihhXtc)^&mM z*Ideowyy)PgF-{!kWe;o(jm`r%*h^qG%10$-ysZ9g&HfC4-;X z;xE%UHE|VF%Sz`Y;sJ6iuf1%9PNjcE>$>_;Rr7HK*;&Am$VZ^OF*IM{_o=-v`=0sZ=Lm`u2@Gki06CNq< zuY7EZQK9&yW*}_*^l-|Gr~3l2qFdyAh{|F);(GSLmO3*Fv!wLDvQZsl37E$SB` z+q2sb`wJ3W_DaInf@zdLV;{t7j9odHo$w7AyCy#L^ z=%$4NuZr1sM30lj3Q*6!cQ;3MiSB%Js`rgmL5?h>EIe+$c|{D^;XG9bvi2F2@J{*I z2o#fv6m}JY#-kq<-{4!WDS+V5Hmz*`{yqBWcQViGLJIGnX(^Ol0|}Sj?TYOOKNk~K zUWwo0{H#`4U*mi>{Lwb;dX67?-zsnykFHGPfZp-fyu*ZH(Idm{u|;D(Gj=iB!C>yQM{X)R;|3epbyvv86GOk&|nyqKM@V~0iyG7j0e%)m0s z!el=JVvo^s`IU!QOFR=`hDzINybBHJ*al`WN`u56(VkHnJ~>a8*Ow(gRw}2t5-LOE z_wxx5zx}0`A(-%S<+D{?Q6855&=XQiJZ5XdR>d3WTi+J(cNc@jr0vGhQazR0f2D;h z$i6w~{A3hAiP9`TQ*(rW^GHyY0Sw-5$Z@t*S;2^rQ|g!7tzO<_roJ9n2>SXNOEBvk z8x(Vf7{y*wi=}n1(fTQ7y`!hRAKPNeEL0P!%9wG#FeAG+%t4D=-?#BWE?`UDn<%UV z)8z4Hmw?lW=&tN?pk>6CG?^Zwu3EMOS6Ao5Zm019F850eGMPYULMrL+FO1{}K-J;w z^ViO0K=~<;Pd)kbLpJlwYb3h-68#6l|wS^h+NQ8JAu^O|=q3GN8RQjnb zX4iqMWtX~NAmOuQeNS88$taQaPWzp#_IZ^@tSFGpKG5hN;&5p;D^|=d_n<6>1Xuxq zd>bk?k4L}>W+D^yG(<66xsPo@AeGCZO{i!2!tG>+>=vBuH5yK!G;0Q|P?VnfB{6`9 z9xvtWZbR;%ajdD$<#=zbk7dR;4q?n%!k=d>ei+pG&NenM9U==sGAx=<=igI(3suBK z+OWfB_3chqYJqQhLuPibBwSZK^Gt+L2k}}mAKs0Gc-^S>ry{zgUZgY+n0na8)oe9@ z{nezsP+tXXLg~8Uua>Jgjd}pHOZk)^Bm^IDr#W_--M=&F;I+4S&N+gh)Wy^qg!C}8 z)@n3sl=IemkYm)*s7*E~Eh~I{@9+);0hvSQy6To_AJc4d+(KOj-k!%MZ4}#okXsg= zGt-3z7|TBvldFB*rN@~>4tSc`r-8PonH%;GM2_>X4?;-dKAn9t@y0B5*abf*8M# zW^Nj@zKvw0=6%fyCJZd)G67%#L70PQy$3(EGqZf(c#8NfdH!VzTVJH9vvj*X?}$DB zH!wwdOX=dZS&aWRmn=~t-5=SDsCKedp4597{f=o9mpN=wL0?eM-OP(_?_I8)g?WyY z*@Eh;ZeJy!qMH;KeRH{JfRpeB>IBM6e{A5*;ugIxdx;gzKgI@nhQAQ=kAwIIf zNN!$T`77gp3nSN3giiC=HtZ+fQcvQ zXEK@|p+>$rt)+loox9;mZ$dxSk(X&p9GST_CmumYc~QO{vf_Y-b(_O7%i2slR>}+ZRk`Ay1ao}w-ijP) zRP66_^|0vfW#4!j?IbJs-2BUf!M}SF&bkwtoobnxpvzYM$8G zr3P>i=}8*Zx}WX`fRWqD0pgK8uI&Oo_FORN9-=x(s$QE}m#2V(%iz}J7fR2}HCn{? zy#V5+fL2%9Fp6nCxoALonF0;@sydIEiQ2%1_NuL1=PZuW=yKFTT$v+hZ@G`{Bm0{4 z&mtPE9!2P~4eCfHjejo3Pp&;>cUM-WjA*-eYcGeQ`6`61sbjI4H>hDMORZe*30Z@Q z06_T)e|!3#d*bfnOcad6nuUn~6On>@w5McLV1T)d0N?M{Z7vFbzVE>h$c~bafx_;> z3L+|Ux%kuMNVf%sh4`A7LF5$v!bnCc(LiGcb$y3@j6_~{0-!T$cy`AZhJedV9d!&F z(`=O9OE?#i|7jz^iXtvA=gqR7bzrPd&B39z%dLWk0&?Nn=ZAZ)>3R)cMu$OrU9&Oxg^`^S?$C_2*RfflrOIE!qutVy&@K$Y$~)!nJ{ z%2$Ru1*3O+(ato-u-vDi%Z+4x>r)QoKLUFe(Q~)cYt=L{B$)P{-B32;^w9j@H0z6g z>G|NPj5HKzJ!SC?B7OQoF}`JB1brfc#MTDkwy2xYy|pL#M4_!L#s@k&}pkr z-TjSRvITsf29m-++tJV|{Ae2aPq>4%nbJKI5ItR0mv}GHz%9GP{Q50*?Z@@<^0jCQ z{$1~GNT>-~ob%D|GlwwwmNTf=le8}%*eOO~u6mX*46^D|Ky#z>Io4k3TwUnHc))sB zgtzqNNQ@?PWhxjklx>x>N*YF4&2>-uLf26LO- zR?w6xdHFqs#U2DXEy14NcJuaepOKt*1L<;;BOJhv@bWR)Zjf&Tfd&ZO2v&90w!6_U zZfD*t-U?VK?!HenS;2(g9`W>sBbw}Rd)u9R`)|ZuzNrR%*#y*`Xd%*d9VJu;6yj^F#E>mJ|x z{@&l;{r7cUa&^w@{eHb)ujljegrk9#)PCt&vutrWO9iLL_?qUiypGG)tPjHgjAIVw z3UsR@u91Zb>a2CO1-gepoTC%5;KG(<-J&UBbsEUE4<8_r`^@W%Q~u&h?<8tGkhHWm zmR?{o(>Kra;88co*V$ygD)*2y74Bx|o+(`UwvQUIsBp!{G>6nBmuh;;M)~=YXsys1 z9mp5}XUZ#Sd$~f%EC$SVH}}s$tLD>s9!FG8WY?W!7SUxUkAAO*cRDkGjlMqRS=gla zwp#lr(SYjbK_3hUG%?iu?HEFRx4fsfu%p2=Cwjffna{R24p9z#?LTJF>E-9@hCRGe*j+g3)?LTh2Sn^|O#b@(O7>EEli2rgz)z1#ipd3mO2R5UPVgU3ep))S9T*%F(~g!?=1B9}V^yd?LX z(uE&_exUZ1ReY(^qUFVHn+^$Tjk=+?X+LdDsjzmzU^a8DqJDoGiQAo&AbnO~wJ1%d zRGZ?I{N@3|S6~R$KEIcA+Mha|!lNA<9&ta%d3&#$_c6s0x#!J)v;jKD$11Sp{4F7d zLEk$MZ_>x8snStj7P@jA6$}sr!Sbt#OL<3Cf|nz0Wr!BSh=7)3;>pz3-B0Z=F}uLp z$9mDX-ji0?)av&{h2mYO9u9;@CXDxW#p=>T-6=XpM#n-ZuW z=^!2@$oNxWc7cJ)tzDu0A=Q303l2k(%*W}C(hRROABx*jn|BuVg8Xfp8s*-WBI@(9NFwF3vgNLq1|G7OG+`0WiHJ zZa60=7tNhH3SZWN$5`y1G9v0{s(C-$~FW{~Rk0QMgmon!Qe#i7t~ zW~nY8mF)gO$HX>;r&S=4@1Kp0mT+LFt|(ACld1 zM{D}gvtzj`0;}%DZvzPzY!%9zOUxaYRRZt1kQq7kPel!K+J@wu7UTn?Sun$?!@q>L%+#jU(j`}AWPWX;2;oaFuw6v z(CY4-+}0O8-ti=J*}_&OwFN9!!xRZVxJo&6H8=hEy+->%${%LQTXHvYy9T=f(jTrue}6NIZv)*c3CUQKz^O3KEqu6R zdF?*+^|S54Lq{qXrf>tb3CXNutkYVgf5+pUf)934?n!g)KIxm+0urgJ-}PXc&||b1 ztF9_2wMYJwC?Du@fAoG|Fo%|Ei~RYr4k3GU~9{q4AMeIlIm{fbm1y2vu_ki z(|3`gziY*uO5s|$C=u~?Xk0VlDC4~=3lV|lvbRRoH=dLruYM2g!n7@>JeI!Qx2{%A zl(eE~yE9TXO&;#I%3pRJDdKhoz6c> zwxT!*(iPSxIt3a-(c2#8R7@;C6izVy+fOD_a$bwj-^+Hs;v(!6H0XMx!=xPD3~ZPY zyN&C`QXlzCvAI>caHto0q_!TgFNV0BKl^~!86lc;w2~2S?cM~Jk1!(5T^Y;{XJLc8 zl_c=z*nU2T2ew_hKYpoKvEbk+1r|#%6I{_Wk zCf}3MVG5@0VIGJOfV-;S<*^oXcX`vP_Ep77MJt`u?qAyTrv#e ziEm%xkF@>4elS|}yw_jxLJqZw@M_R-{(b1{=Gs{?SJtv|6P^t#on4Ra^@z@0pKL{1 z&wMMHN2wM2cL*o|{iF3XIk}f7m@qnKhVw9ZJPLi2bBaT4cY6w(MS<^r2&mKj7sZkL z{HM)>ZGMWh20i(F;I!;|TsITdenmNBKhw`y7?6k@yM2t4U#$&CbQ2A1dB2+bh0@J5 z;5S{%MsO7a6~_=MJ=tyK$hW;O@z5XIqgzV*TF)pUQX5~V^PxK{o1T7j!{CqWAd4c|e z7aX2D2)}(F@Y}xcnA#Oo4`5D2L*2tA#gf+P}HweJn>f0|enHmm=hFd-5Z6j27 z!TPzUlt+(MDy=bqc&OS|1XOmskl1o7f>M}|5?Y9NL@{uZ$R_)zmS!Dqv}k7AH+fcj zUcl3L>qfiS%sDt^hO2ox;wZs?do>Cj3*##vJ=wREu6g3gzBAppSK2v5iY)Y!g>K-* zEw*`|vZc4(+h`Fx`d4<{U2c;!Yf^UOfopb%+aLys*lMIW)}RQiNS~<~_2a)BJ)3N= z_#Gn9VD!`?go@MN*skPv*#}2qVcq={H+N~NnT!f(w1~VW{(|IXYOa6+$cidas1WzG zyjo)L9Vuk2ec|Vvon!L+{GjgL6X}W@8J8A;gXNS%^@SDIk;R=HGJ!TBEQYleb!Hj= zP0nOc$D*(uFvcN@YwLY`<}a;YaDoo!K(LOU4+H@#svs{(@%Ik$<442ST=s<3fKyFz z@i|V{56|AZz0u^CR%BDp7M~AOKWm@@pQbAz?C+KTB<*N@9STXsU&uI^VfNa~KGYDG zJQxRiET9@U9*gz@jmAG!dyBJJK%kT4r`6dV)_qf|EAU(JUEl%v;j;Zq2^7oIVcNPC z#nr28UQ1Va{<@IcslBb43HH`UfkKKXnQ!sBB^JQqFl2W*5S3ZO(h+x4&?&u(Kd^KXhV8L#z;qu)rQdjZ4%e%=i*U=|O}0QBI%6K>IeMl2VaI(VWc_ z8m!B!50JM#$i1F-cE|?&!BMV@Z%ElvC}Dg5kRTRXdRet$ioDH-Wv0}Cgatnl_jd0z zIheC+<#xF*$-&I1+Fs>*jKM}HI^#OtO&S*;iM?o`mz!zPgae{Um!4>tzXuquTizSN zWP5X%@Ff0#C#wGFS$Y>8&V)y=s9L7-7B#WM{LszxoVB^*!*l*t;~0iT&&~iPtl#yw z3>0_dNX&8lLr{eMO)_*i7(rXFy8~RIQlZvy{8je(^FRbTyGFv-!62jCi$=0p^p2!YU_;#LMoS|Ad#WG6 zJJ@S>(h4tPmjWVooib6DT7a50aGo$ywB zQabk670AEob?i8=T@}v0ue55rZ6bp16xZ|Vba%`9A_z6FPY6U8`n}(8aAu z`A@ul|F5bEWA9CJe*OVcU$3Yh-EifbFIufQax!$zA5vC#%Vi6@qeE9byG;q-))8Z8 zf_WPsQ{vaYUlQIj5bX1QBbmau!9toKwP;-lva_Z(*| zM5w@W+|xukjeCkWZTu&^m_Z21;r`T0#2EIjGB92WpA7oB($c;ab!v>RRHG6o&Trfc zP@`f1!0}1obIWQDM8Gi;uEg9hG*{}cwjkmp0Mc<_2m<APT{OU{rKi+NQ|3H zha`m(Bhj~xhzMSTLWhge224QZm$E3==7Tqw#~`QrqIDMSJ&?M6 z0&H*5j>@8!q~)tHRO0guRsFi<#RCOkXOrCl6~24qdC4Kd`F<&Y&=AD&^8I6c=h>u> z?CZJ2kg`mI=qoGvc>%TXn)-0RYYd)q1iVZT|9G){mO!J$=1<|aTC2YwocyN9nCusr z1G?KIIqhtWjyP+gpPo*XgX&)C2Kx$rke;{5KKj)0&pXPblm6QV@h@Ye8i&*mx0_VZa*p#1?@d#IG$4Df7;i$ z0;L9LNkOl%mW}otg^hDkxeq`D=9fF`6oV<#FHk}?u2&iluJW|)7_DT5aqE2%_2n+ z_je1fF7*vBB^>y@_ZNUgoQgG~7nD!!3W_yj!s5M|6}xqtoP z3xlrCpil&BqDHyogsT1V@y5xl%0qw0x1Au^=6+um*a<*oV0I(G*97Z2Iea34&IZ^G zfxU<8z^oqHIKhHVMJro$7Mr>)?~69ia_-6QPd5SAjdXa?hj7Il*p6nx!w0#NuKr7^ zs+dMeL>cxp^zkd=F09!1FH!4u#vw5m`$vzDCcMMv_|zf;Ynj~W-13zFL3e+axS|T$rQaXD->&%@9|1EHdt5>^lh0OK zmoC^$dySpn&D!(#GYnJsvR>l##sN{$eZ{w5K4D#-yBbkK!#=B@uN7q$x5g}<7|w>m z+2SUGV!P2kng-z4(!M?EBp&nUq(^=y&-@#C7-MksDzj0;-VIlw{=JLKzcu2@w4kRA zk1W_lh=JS-r*V%+pTrMN-kL=9F?N1+m>syhAblpeU8na+q1K)&Wl&`}3UJNBT)64`AosVjg3z{l&%y)E+kfxXqNumEGFKMD6<6$?Qz*Jp5 z`MJ2eoVWajChLJPQehf)g8f0+GrdYCqhR=lgI4XXhJgFnCU ztO-}SVYCuVoOUx6`SsfSzn_q0s zcHooAj6>L)qq%^ntPO=`gBTq$m012llf42v#69oJE>qXHAK?QtxM~%3d~^XMH^J-v zy>OIBGVrS*Z1n7yj&iKa4^bpl-69#ZC_ytp$3$pK-V-hC`+4fbQ37M`F6j46^PEc) z&e>=lYX*6@WsTkxngs?0Yec4)9@NOP1zqj*M#@=rpx-|c{2(?@SFz}2s-GM@j?Kw& zZIIo86N8(Kt-b$GP{ETO_tHZjtlruCIqL*+bKo8TZ%fH$a$VnXGNf1Uo+aJ0AJAkD zLQmGiwA%BR8dOUD|46#VfJoT3o3XieH`lZon{8~iZQHhMb4_iwH{0G^lWlIb*^|C` z-tTwwtGVY~IOjS?`L_?UIagc=D0aRUwixT^GZ#NJC0|d5fTp*`&E!9x`Z#RL7M4}` zEbXdDk8MNab0&tmmN31#>Rl)^WD)v(t(di8sB!l__^(k$eUeqmoxDBc*1o-mZd%;Uo9CWCxZ#2?lpmCF!_qY&bck-L1A;OYFFN zmAIJ6$gWDVy3IPUb@9$bT~_yCsaJ&m%=Z^-?KnmT!j0tYGv^LzC9gy*s~w0P5~D%~ zw<4RNAHZHi9Cj~Q(xVGI;@CgFk^u(lt`kMy`oiVCCoU@tmZtOra7th*FcLA^hW9`- z9lmVc?d|nxHM%r8e4fdcwWPo0roC&>y!h~3zYbz|xRzwo5_ECiyT^svTW~c=PwbFk z(|iy8i7O}Ag3Q=zR2Ga#&12OTu?~r~0b&d%dF|hO&lww`d{1Hb+}1kt(o&IQ*$(h7 z5X!Qb18n$s{&6KPK85-7Ff&2>B=?HuLivVDz7Z5=RDUrS)UCgVeL8$h+qH?f@}2sKq}5iYsIWp+tFJD6r*!uY#oV z?XC578{$s^e7e`PRw@i*r_)x6P_L##Ia|h%RvlNO29(n$c*a*VE%dnpKPk2JIQ2y` z)iwj0t82@Hv8UN{k1qrlNYz}7;vy@j-~60mMTua?;>j~#N{|#*ScTDmIp$oq1XUu| zi4rXVusuR~`;e>J`BS16mVT3$Zptn9g=5^@ZN;sc$QzidWjTT*U*Zv;v><{TYtUP2 zfWMudv1reQq_;;D6cR!d!ZEk7jKoO)ck`K82k6wsV&V@q0k{W+eLu?*4g*zh(}lf% zC7vrmfz!V(^A7%ozqy>Rad;;>&9}Bta{@L85;5_%2{EGBJTYVw36@l3) zUjU?XKjE)Y!orrfr`JIiboDBs8t$xiSa%2Pb^xTLY@W-Kv=Gc@*L!hLW{X!wPAE1pp1bVfhECCdDzMC`>~><9C~dq zH?W=9@KX0MN8gbWFjX(msv-NkM!xtX%)tj09!@5zSN65!&Zt#Gd zwufCMMSy?VHb#4)eKsNUQPZRgy({dNp(>xCQ0oIOhmqdFW<8JHc^N-GAIIVEk7JI~ zS7#!3I2k00;`2yOb!z9)PwRW=8Tp_nn)Bbq-8>Fx4&+D}#gsk24pQl>Mcim$8g`AB zYjm(o9VgOcgf~z`NRcQ3qATo4mN=cjmL4UTEkQ4$RS2q-Us(ZJo9=NbiM*SnmpQak z@i@-Hsyv>qRr6(3$6P)}W8Vx4H-dnObUgPtv;CJ|22jmtaZ`j47>L-AR8F6F$D6yPg#Y6~?wckm1_`eL>H?2=<|t9UAa!x6ZI zhNYnByh_B{J&&nB8LS_DPpJESZR(*5x+cS%a^Jg0{?h<8D4r(W>k+@J{!h3ctmWB^<#X8wtK;KAjhOX{bT%B zvhpoL`5rWBCp1pyM98Kpu-xf4`9}UvotxGlZ~nlxs0sF_@UfOX+oc zID1vN+$lm_E20D!Us-MVZjoa}o#=ICW+ zi$l|;KQ!boD68%veqOgH?P<+T@^;>Fh@=jup=hiy&p`WFoMQcjgvtsyNRuhIHF*4( zwuQrMBV?WimOrVA{{xg;^YBd)D2nuvvDc-JUC&=O5LU_UoTI2mDaaHQyIOfp$lC@$%St|1K|T3>=>$06x^ z0N#*n@`W#$aQD6>RSgf}*v$2V$B-LsbfYq$WkGcuz>@T6ymZ6DV0U(e58NIvwCZq06UJb@)*Npr9&gk? z41Ka5G&pe}S-D^o*WYov`Af57*IaiWS`A4}^o+N#85^sL7MF;K>ANrc4HgF)|fJ_yDlN&UBG((Yj7#BG?Q%gLWvRMQQ> z7C;?Vkr%l9v@ODjX*(kc<_Uw{PQXJdc`(dviv&R__wy_Yu%LddZ;_fzB?i?d=h0=D z5&ptgs*>yi1hgPQV_Bfx!*Ye3?{RX!S~Yg8_xRnbV=T@=JvJU|G|c)zn&HZ$Q5;2pZ*8xq6zMu1EY;oi z2UtGiFXado{q9VuqiRl@JTC%*NZs1>tr4)yj)-cJP$TVJQ-f~T_%w)b1V2&siQ_Qk z9{Lw4GgZ#Nopo>ww#l~MRWc^@9Lwi=Rr7z~o+ZDvNhn+Nn2*C-S82r#TQnPTB+}v` zbX;lKlp!qgs$e&oU6JJ))(#(c`N1HwQq*HGmHdHyt-;cDcr>Jf0ZKGS6j~s!3vkI5 zZoMkwVQpPZ2+7jG9)0P3eD7#x!dwX}FgMSSb*XiAlU3)>_p6H=L#b0JRwme7!YHRw zuT~bnFHHA#YD;V9EQYm6m+x3m{`PIf2CtZ5Mj>*w18y(a<7IOZ)s_zlZUGGwmFnE2 z!P0)Tba7x1r^F{zM#rJix(uvCJLTU)nBgsueoq?~D%~o42$f-MAm)(Y(--72c0E>k z6khDqqo}ch$)*`0fdMoOjS8sy{%?m`t**KaXI1F|>>IQHMt;desu~YA zSM{~Wij!CP5pj>#v)=R&@$BOskpsIFZ2GXfWtT$B#>N&=<(gmulI7NyT4+PpSix@N zM@7}I*cNksrNYc{#O2vfYzGJjudLR7;9Lq^1EQ9AZ4aBuJCg1G#~m#Xm!IrsDu!c7 zHxKE5$L3x05CJppXgR3%hjmMQY}?gY+({lq^e>McqkZu10qLtV`!YPL6RM0jCw@2?9Wi# z8M*}}Jdv&e4UmH7_XJ+UndxeHFo&ff~@F{<=Gj)L-51*4Ft-mK! z;`nscER7{nU<|}7N3>s?x20sm zRb5)C!&z16!=a%b@^EiOQ8J#I*x%@*1wt%VZq$y99KV+9B9YoI*D9)V2)ZU{zNl;O zejOGD2n!1bEZA!go(f?hS&|Yb{^y~#A>L^I(S0xDgPOajKd%0)h;F#~y`rLm)l5$) z&$zplJs`QppA2H{vtj~G9mN*`Iz5ZgRpL}S3Hgl9$ro)sskN^d*)i};l89iJUARQ` zHn@8HJu<7(#^`g^v>W$8^2)69z%3&7%Dh-YAq_!)*@q0h!l?9S@9bn; z;4XtQnDpM0nF8jA*i?l2+qi=$cYXxKaTiYlDT=O{etZVse_?ltd#2<1Xt-&gio>E}rU+yEGm9`MS`mcM zvZ-xFjDy~1M9g6ZAeUMSry)jKusn}VV;t1|oxF5cpyotxjrNo^6H?(63{G>WF+;4t z^Ga7T>(;j`sRUv{sOp#r_2+Z$+4>{_;X++tvgX2vNTmdwkqXd^Gt{Qv~6|?n3p8@I$o3 zqH%_kzDWVsXO6ux$0GJ-w9@2{@{(t*E@yey1x9$SS1=2?q|Bo#pX)HJ?7 z2gv8X!Tnfa$76$WUWX@UXsbs)uvQe>`ZXpSn08W}*YwF@sQh9pIRg_;s#rwV_6R;ld1tC-PjeTiZ%WM~@c7)VjZE%IEX0vqAlB3*85MrnJZgca zhe$#IGOfso!6km3MRuDG zRA^CYrksb{RN3uzL?j8KU18X~?oXoM9RRRO5i7$7xHLJ^p|s|ReVZ7naeh<&8$pO7 zkAGWOU6c($aQ2E*%z*QVyd>~#OQg0)s6HJ56@SZn2oz7XTE3C;e4W31#bVdbN!|nf z#KSKRZvZk@wo+8MQxR2ZGgB(>6&f!JN@ql=$vy^|XH*AbPwLEI`{PO#1q@>N-pM&#^ z7u7oCKG$@4Xx%_=le3&~nVq(IyG>?6)N?S`yo_xNi^M(V<3(~$1QLL9rM=(}%L1B! zM60T3kzX@wCY9XwzfIHPp}-HX9!Xb9gn+kmTw48t;$-YbkGK|x*!KF5@vqj)1=X=x z>>5i%k2RLKq&XC+HmRE7wXO^{@k{zy6P-h?GIZTOAEKwNRo@$1V_S!zrnO+qIV~QG zRfO46a8lLKE$Ms!SMRU?{&b5YMKN2^=4QcUjnbqvI36= zXe%^pbKNdhMIz+R_sQY1YwF9%)*fsMxY*@((oiPMpUMj$uG|jo_uLZG0=KNci&Wd(AJj%g*#_`L+6!m^k_aaz{UP?*hC+H9C!ctxy;9OKTnuktl zXF{n{{0nH$LNwyKJ-D^TyFB(QW<-tUNzg-{~eMZpDOWB0d}%gK|8<|#4Wts zQog-#*)kJ<65gF@*W=+K`a*drP&=|%PS*Qz83_Kk#gy%ZDv%RqUT&0iJVW`SM1syH ztZPUE%OGcoms7hDktxESbhxm*mlmEEdq5sUszH%b(OD@#GxiMOSB~kg7qk zSOfuMH-DiJp}D`ktq?2{6d`h7Ce9+P&8MlqPP%^!4m{c&J4%D}W97`-c%Up>d^CZTd&kaz|-uDua)8AJo7KXFy zs*^HgTH{maQpveIM(DB817Ye7Y0=c!@ApAOk81Y89loz~1(`nYp3%K(l=0Z1ESMOH zwEg7(Kuk>M|1e2p&?!=S+ye`C0Upv$qB?YS0JtuPWhUc(yWHMWa+q5$)fX@cz!U~$ zZ336!vY42Xz*9D|`&4lUtBsbKbM+4^qTdJc%VXpE_xx2;|4hofLDgT;T*o^$OrX~7 zQ`LSt>9m&%=*MGG#(6S&^*Kwgr$=~lIro|>7P6Aya{r31)`_3U3R`bNqN8a*yqpt{ zNNVy%Vi67P%LnEw$cBk1fParvc>03@j=Ug1Dp3X5BOQAabvU@=< zMFXAm*+&p%q9$SSnE7KJEC!u;`Sjy^2j|=x>3C4xAKd;WO|_u1`OQa`rJk-3>3V%q zu6eNEI2w|hbU%fA*fNSjobos0s}M#o!Ct}U4%JPs;d_}nBeRXfJ+uF|(zb7+jmVeS z>#P^ccI_+vRo!6v&zyQUDCJ(rL2T=0js0alS5>~7{%V!N9MI8yw)`Vnz65_@cWn8I z%eRJYr9vT0?(c8Qv16!JrpHWql{FjrW&P;OH;jkzRi}hLp2bc%%gO&s z3E_#c{EJY6?|OO!eKJ!J${@7kh>jd31Tk1Ccz475p>hsWi>QVc)K=`Dm$n)o7*#ge zJpcylVN!C$eS+!jau8b|t7%gwpl?1|-Ef>w1=*@$O5pDXFge}LekE(};BJMC^m}O1 zn8}{&@TkmYy9msv0InbFTi+M(mjpQJubj-dJ`OVWRcf7yyLaWh2_IkV6SNB`V5k_E z4RX?kK%{1BcQp6{-fcwQZGRlA2&K|=+#D#XJ25KU*VOB!Bvv2z~IwXP)&z zXXN0s+`)WnoElG7XToR99_j-q3-gNrq)3*Dmx;hHvK1e&?am)S@PrN6RCs41KKiAg z`FhDt9~H1`y65?b%l^cWDD*;F)BJG!h zR?_OUjED0`A|?||3Z|Z~^fr#!$^GooA7cxzyWPxU->hWU+_>mG8>ld4~J`>GEh+47%mo9$~{ zZpVU3%O)*bWRU9J$*3v`{QXfzDI%SthtIa^*oEeWUsno{pM{+B1l@f&@9Ccr%)~+H zbN(~ACB4xn9t7{5s=A1qAeYQxt`IdOIxRf&5a?8PB^f+1Ss>0^nHzHYsd1gdhf0O| z@6uQz0nt;=ii#7W;-BgA9CO^+g4jcd2_z#E(XJy0Gu71rmwzlxa2zHcrfzopvpK#p zQ@};s(|n5(kM$wIhMAXCVN|l|1i6Q9;|I{P&vi0s%h*^Ts1ZPng|~UeJW*4*32VKS zt_b#^VG6xRA_(>;H}JGhrU-hue*+*LbQD*}Nh9b%m&stKd*#f^;jdg}7s}1&zb}LN=3S0d7IFYwR)K7Aw zX?A&W{JOCujz=>_H&;dS&&g>l9*4r#TO}f;xoBRZGe{EJO&u>RZ^7=v|3=Em?1WXy zDsVFv+9Q}vG~O7ZsMDNQ7_MWBYu zS>y2+ATOJmZ1}PLF>jPVXBa5mE0bzw9g5&m;qNOyRhOfQholY@qE_`3rn}Um1RabF z;?*@QH+hb8u3XFSNU83nf%-z71rNl7SotBA=Zdep`6@(Ms(aL3FA+(DAIMZ;{~c;l z%cyNfREoNhI&L-Q%B}KfS^Og`h5JD&xmJv(d%-ZH5(^hCVXeFnJP^vR*i~w0D+0^k zXEiB)v;txNnnHAb=f=H3Fv&FY&fq_aW8j)U|dj!7S$>oY!3=Blnr3flVQ9=Qo!?DG^tZD|5mMNL=~{-7n~Q$U4b)~N7!8Rn-C0Gj>sWJ`}GhGrvKZ!cc|9nC-F{61JW4qN7VPi#8CRl=UbePb0X;xvZ242aNPt(IA6%}%eX@;p*Ok;rzkx{%D_Rco5@hWrg_0fxw+w*d7piFqB`CBZdy+n$`mmeNZAA@Ly zU@vD|$SNm{FxJR|!x@^fY9kgH6&$ov3NVMUP99U-Ya_#eN$rHIi^@K}NbvPj2j881 zpVH+_*&6J$X~)+R-rqI4jlyd*s=NEQt}ht)OmQjLos6QGP+kVNg`JUvnT|Hwz z?iEx>^^}}k^%G`2y(*{xgU3bgZetZ13`D!U^5GPBbQ|!L*~^D_kc^(bDLqA_%=rZ7k0BZNOZpj%9by{cXT?=) z9lwa7fQKDI2Vl6_@(ZxghdTPa-P@Je^mzRLAhKYoOZlCwpFk}VTZxbVWJ`aq(|x`Y zuZ0D&N~Q#)&nrP(qC}zjO||sa!J=O}FPP@NKf}uC2DZV%Q^YO&fFK&+dY$hL6+))h z2~NNmzif&V)a_cEogqRzo4?ab4X+NZ!xf{qTFgHh+%Ha)Cgg~%4= z^-K#dHx|q{%DrLLu8rc&!&kVD??{TS`55E>LkKH!BPypB{WgWZt)!GN6))GUyt%nd)!2p#=ac z?BC##i^4`L?MnMEUg$XhKNX`IAH;a?GB-3PIQC3AY}A?jA9%k+N4%2OcSXvrumT2s zxq}^%a&7`3O^ZR(*{saNzx9&5 z=hNRfJw5+mC{G}Q0Jb6cRcKl|Lsh}E?6{7g?L7NzXzQPu|^Z^5MK53BPCgrng1A<}WlDrb< zjP@)HCEQOW)n(lKKl#_)C3fu%DxDAwtglIAwok`8*H(}lTFB)qLkaTW^gxh>D<*oF zv8!)|nf=||JDq!0pi$RRH<^1;V1EH}rsXlmzIHJVKaM~wd&SwLVP zED{f$4Z=Evlmy^PcqeU_CTc@J6UV7Yw!C*C6o;nxY2mX;^Tu1{l8KWu86AYQz#AGs zHP+r;l2Y8CSoro4ij0y8?6&wNMAsuMg$EN)WlhYM)4YNGIz3dbK|+^*DnvIpQOehIOe{3+Rw2oGk}9J zLq}z~KqKzo$#uAwbN zdU5_`F`HKvTgc#WNNQr2#^YZh(8HxDw+o)7s6;fVwcgMgUMNp0fII#I8mODF2ANWeUlcloyJ1(ndmL~&DUqv3gd4?}Um~iU9&)@p z3?cH}<@`+IOZnq8sb)xo__ZJtTNF6>4%0G6&*}J0-G7B;_olBCLL&#*XC>aC0KHgm zeo*&}zZ3*PS#;=x$}}82nfd#_HUAw~n^eLsFi@|Tc>rWzh0LP}Y|4-A)qsn%o2r2Z8X9j6uj>Zn-fMp>Rt z;{2<+o1Vfv%a(sY1xmm=43m(sPF(T-Z~R0Y;R?c=!nkg{*EwyS^b>=u@U*Kl(>4z7 zA>_E3@*5a`*jR3??H8oE18Cvxy_lTQ+f_wFIK5U5t}?9`F)V|2=gO+MEQfOp%Q^%Z z>5rO40lT+ww8?Jki<`7!p*q+Y=hoXc=wkoDhpo7Bi2udZw$|KD*A!}1BOqVP6(U_E zzjEroKL?+&fqC`k*uM<6$GgGIK~dJQZ1~ifDM6iz!-i zMz9<8AfD4eqj>IB%fM!JxW3O}>J!J^l(6bYkO_ev{BJnm^ZF z(*%KTjn3!kbY{Ij9(^)j>F z!f;mOJ+IX^&*tXJ9|8O06Z(-=9K^7%H0PA+H~rfLi2V##CTbyG7`nYT>vhf9 z5i~*TLjXd0lHmN5IIblL#C@)(_#!=X`BQ~prw}^3b3}Gc4!Bh6Q=g^u-_ui9geltI zQ_a6DWj9jr+WlYBs@B@{Ad7cKeaRMNFLY?3ENwk{E(3ifF`2~G$5C1-=)Ge}>+^Vg zB?)2f;<7S&XlUhq64SvZ_o9;ZnLNUR@=kg+`rj-+S<={@=LQ5+4|;D(isi!RbkK^7 z)rhO#BU_1S38o+2L+>yuFRwkt-;5S^`=Q_wZ%v)8A-@nf!c@QtrO|TwX56t*tc)G5 z9dpp8?Phnzzn{8?d5%K6CqbaP%=s3G@CZ4ozdX0<`jZrfq)L zk;)1DNcEwVk-G2wcqmz11Rs;~thZF(3w$Dg^eDpUm%eSoYnm&7^st%c`|xj1>kTLn zQo-`duS6Mse2LTsa34H;gpy^TMp7s6s3hn{JTKD`>p(lfs5oOPA-_^?`kf<$(^PXi zy^y(f)2Jxiw>?|#i+zud1tR59dOFmlM)%)W@Yppp_e*=dg)o^OQxdGCy$tWmdt3M;ihF zQNEKeeEpXQE)i%4otLHuEBoh5&KbJ{w_;#lM#le!A}z74^Dcm6mmqZ>e=vDmPJES_ z1g9{-X4#p(83T>2fX zoQfuZ;4!9j8o^I@Q&GxDS|mpWk!5e|)1jn+QpZxLCt`|6=un#EA{tV8vB+^TDeRzRz zHhO@UzxZ@Qw~<6)PofnM&#w2EpgQxNYxMb|!Iwgm1TwGS-%T|7I;D#>-ywHQDk^*O zpxmX~PbiR=Qb*n_=n&Cd%N!h=yW{LE*VHVuke|z$J6uy+Eu>ffaOAf614%tq4p4U@ z(g~3Q-nSzH0H@6rHRjd0*eO^*rg28UZ44R&?Zmmr!5FsVnu4B?)MOcI9n>>l?4l8g3;T@goBA+Ogb$4J8$v< zII1E$1Gj_YW^+m|E5mr{1QR)t656)3h%oJm|9;f%vKZ!F~z=5rg3 zm`!#FE(L_@gx?<1CC;-oX3`GTFEIM;$#T;7kIqeb(XHo*puL7UYU)87P{FWt*i z@HQ?yW)Gl-oM-r>ty)@xT5dJ?^hLgX6fPw;b_ZQh*iS6h zgin5x5aH41YqaC9VA>u}JwvknG}KvnojDHXGirpr$49#7Xu8t&56ABA;u2YiPodLv zkhH)83I>ya6lxY75$5`rUyy4A(Q>TVHAX3F8bfoBzsOt6>`ief2vYHK&-kA|eeYyn zGM#3@#Ps6E(_qHC4@JXk=ra;6tUCiep3F2hC!;3PTMPt`QKoPI%Q*OYk%~cLF0MqxdGv-*;%b|LE z+*2Gd*H8wz=on7O|48T!Olb+tlIqPMW`%oqfJkBVaH)Y*kR|!&>EvwvLR~P{xA{l3 z``_ui{2=7NsN-Otd^b>2Sq6{?3!?X8{d?s8JQE{M_}>3}>) z1P8)uIPxUzrRO;vqHeQFU=+IfgiOYE;A7^*+hyQfmz(u{SQJFJ_$gPtpmdbk$T?mV zw1jKJ{V_oQU7{J{jHqcuK|KCNB?S76Fp6xQZ3fC!Iyb|=to4zcY~hlukIikqy8VXs zu&72=+i_+1Gj6uegX(?a=FQ(K0T#sOYO+vhLis0JBB201o5fm2dXU27eZ6tS9>ibm zY{@?tZSrZ1C~h$?-1;QdGS!M$8*@^r&ULAig>W27 zo(MO8;*`I323$$b%D;;CVL%6s`>qz?SH}P4X(%%MBh1B3A`qBAxydOK9c`M>I(7yE zsd-EMoSbUKbtBqFgck>>K;yw2)CPV41NJ+gy)W90C$D3CApZigxvpXUUZsY4CYpStyY&JcZ#MEwt=zdsU4 zUE|}M;ae1h zWB7?rX>GVIvo-|+gfPI#~qIbigRa(QJ*GoAa{EfV1igclrI27>4mTkgZ@nURlI zqw7l{MW$}$mVa0KmwLrNqiEkgbQbFvpgP23gWAYIzE4KU{tWb~lo!S&a9w8(VhK(N zVn{Nzj2~TZb)?+7ZQb-U4NeQaN=Jj0I>rvV34LtZ;0-MX>kQuO*B2?LkUzdB% zTo-((n4zBb{J`77t^X)Lb`rBX+m;v>$ad5FZ6Tdfs`u@}rjMw2(r6UEOq-HR{lBgM z0>*(NnYL)h5VsnRR1Eia7e*~hF8e_Qnj#MAXtuX&v3(?GgV&>5MmxEVJaGl1NGh@v zQnAtK_~d~bc!|t_NJQN%dxYD zMlT0AJi>=at~i8UOl77vY|bzwhq?qyKu9wB>)=5&LWilPyyH^pD`z2$9ARaxsoU(l zz;A177+P5cM8(OWkeMh5d(AG>x%R@hZkU%!tB#W#skNGNvk$LZJ?pDVt${#=YHQ2u z_3~w=O+VFY*W3O=UhK!zQ#hf8EX1wZd~LEqXKMP{0O1xR7PK(I(3(GhU~58PD=eok zPio-Q6vGs+Wr{qCNT&gzJ)rW~Q4CT{1bzAeC+v0ZrRr}my9+6snW8^rmHxB~h?8I! znSRh1GK3ZSK?E&xQ#gnf(TA`mMQE8m(8|VhJ8ZDmddiOfO+*9-{rf|mOEJt|c5|N) zG4bVmwr-gC81`Xc+p%0IfoP-YGHK76spwvkcp(GlZ+%ntat!UenJu-o7QYmVz7iVo zr1VmbUB^V4Jzee2wV2vwH24%B{Sx677Q}NU>N|+;M@PNLS!QuLnzXi_YpPePe3<`1 z-J0GMP~Ia17gfC-} zKeygIV1l8J7^BN+)x$H%0QZ3ZqOEm-m#Vkl0oL9YkAdMD6AQ$VPSCqR z02j&UaBf9)|NG-RbsASqDxIJU+gg(V}P8YDf8doQYX?~jgcU|Yy1k|tTE z1g*TdpDU!+xGjPhQuHNBH8y8VjKZ`rLc#_#ve0l?KdDi>>}_R2WfK%+9!WM2{>~O* z-<~!pX6RPxAy+#D|L60EaDvjJm|ET0BGvo;2C?bHY1mspt+q4v-F}6!Ui4C#F`)SK zTe{R>+=>~wzOJ=dcsdooj-`mVSZ9vvB#6IHsuE{U3qO!(GM{DY% zo_Os~FVjBPE8WQZF@9BZSR&JYB{d@D>pv8_yd=!0cC{6n9#34NU7=zlY)z<|IEoI< zV(jQhNvI^e0xlya1jzd?CC8g)ma7bKQnBG#^NKVAcZx;Sx}K(9~~jM`WgIpm1p)9cns z+d$uXcMbo!b}I|DUb~ApubVx!uEVQbeNfn6fu7JBbC#kFAJ=-hY)*yu_ZP>LjqW15 z_9nUIPBmu~3s^>`pi8`7@}p(0Jj1`2Q=HRK;c#f*=7|-dLcBSJA@Red*89MZV>|${ z>C8bcv_DC9+tXg%JBhVdC-d^!@T&0ZpoQIFIj>BJQ>Y%h=s|IlG~}Z$udT=WsjI6t^D%;Ickl zvzkf@xeF3zK4RDyDkqJd>(4$;!?_l!Jw8vTNYRK-{32=bJj~l467~t{LOKFK(!xST zC1JBvLG~LoIkzttuP^B%sJLBPnYe=|!MjMHmaMHiGWKrjdq*^iVpGVs&8B4+n^BgF zqqfV;;G7{-QvBBU6};ww&b^BaF~Fcn$y9Gfg|hAkIr{d6VQ&QlRABJE8?*#aD+x33 z-!}fh?WFim!XRQ3!6WEB0!Ve_jibH(X~kAoKgqLqi39y2+xfuj=ttHt5brB{_mbn+ zFxbJ2VqV8wP%? z!$pv4<(}NG>_}s}WXu1jToA(lZ*9>>LF5}B+)&iLesREvO$HrNDKRnNo>whZ zy9`nLg$*uZ{ zUclU^H^(_0fnjL)`!a_wml>dd-|7`L%xdr3I;sQ~V#`-!QAf}&A}kDTLf%>y*n$c% zJ{qp?(Te;TLNh9D$Ym<0XQqZZq?Z0slnB`tXH{}dYH^WJYi)xrS@PSy7Vp%JV-D_ zj@@^@M}9b#=rQR(s5eJffx_0o*aJ|ZBw#W@u)=Ba2}!#8-sOz5lk5pgM~=yUuzCku zh|i8(P!lQ@hv0TDqPLp+x4eGQFt^-)M=r_KV8fDRR>s95-frA#1@*F`6PwD zRHEch)&cJtoCBpBKL1R&?X^5jEnF_KTIma%^$-T4D~jEY;=F z^~p7zFQKNdiZ&6xb~I$?`#8@Y7gZc9jHtqbYR_j(l>hnbzZLs>o~;Z7VPi^TR4=y? zreNHsRK`t=wdr?Py&2sE_o$S~9};0<);msD%;s0u*7f;`>k@KJt&z&?cux*TRx=R= zBkQ+mb|HPODp(mxswoJrmSnM4fvnPJ8i&n@bt^QyGg`v22ZWY$Z7~><0${;1qBn>f zN^TEHjC!5g9P+b$cij#sftSr)FjQq#$6cvc0UA9hUxzT7Cif_jV5OqoaXKt@%m;!v z7zKsc-vg#ski=v;jjw@Dxv){cAnfPg^7X1xjpd+NfC9OF4w@EQ+I|;ekJqrD%YL{x zcx%w&ZVX7XyB1PKTlqiSD%$vYKENiNV+2Ihtzl62#<0s&{^2y}T+{|?4CbeD`DeYF zirbDQ^_H0W2;0e(*K4UJ)CN6P_Xzfhj_#x@DwLG@Nb2^Jfs=xv(x#Go^akFAAzM7= z_GPTiU$AiI`&liu_wc?I2IPN2j7W#HiSw5zV^EE9Ic7*mqnJ|vN7Grb#nnX18g~L8 z?hL^txVs03;O-LKg1ZEF*MZ;=+=9EiyW0f!;NYO~64F{V%*z7q zjeqcQ>Mg{*+(PfNw>96H@52}3DyZJ@8dHByZdwdQ2C?5e1rW9rb*ek4a1YZHz-b6l z0uly18tIchF!RBndJ!F`0{TV&{=hmI^6)D`et1+_l&mgp&B-?HzI9Z3+(Q`(O$S&Y z5P|P|56(l>(T^{P|5q)?%OR>F#w&7L`l>q0-^?1=OLfX$?Ew_K?(~Ke>zag0KdD=3 zaPOx|qulmMU?24K-@Cj`;aYEZb=rySMdI)AJXXn0@~P+V2SgwuApyfoOU~X@J0p1d zMJlo)W7(?GEEwJy^>E+5PRbn$*-U{fk9JUR+_%0wDk-RC+$%@Xl+&J-j;zXY+d>#csz$o4d$7bVCU@$aAMPjSlz4iLDRH4hqsM= zkPsGCs%$@-M#)zFq421%b@Vtptmg~(?`3(V38`oV*I}tipnN8&GLjKL9rCgLkR

9c^+JH#EwsND zcpzDy2NX#ptKM81b^rd$IJBG)?xPpV`DC2%=Q_k!w|k4o37^~W&u@ylT>jCxqsc`? z7&pn$ZU5wC`!(%h2v5C~=xfj2K5X%6(UBatvEZl@J)P0d#=G2Y=_3OoOCc6URwIt| z^=A}R*=?-*CqN>n#&lAl++MoB!8K)Lmw| zd16xmKMW#R8HqSqpQ(v?NV?rG(!6Du^YHfBuIqz3K&?PTg5qwqGrPeqLs~KBuvMbf z#@{#k^K#jJET~;TCq@`Z#^}&zY1Ueqq6FXZO0qg zor9I5w%kf^!H!K@6QuZj`(Uo|QRL+jl>Y$69^kc1#)pTMLr6iT`Lj!af~00R{|HHp zGuaB+536Mz4u%f9w<{C(#~TFJj3{-^uBr}0?z@CeUof&)wI)du@v8WRKhKk>NG8Mu z9ghK$nL4uA_LYvG1t7i?MnF;0p73;KUlLGTUg^syD{JlJ;-O8s2E!k63!r~J_fT7d z@_7QbY;LB=2tZUaH9im9{C5NeyPax4uBiQfj&`d>9jD{0{vWcfPmB*!7}}=T!KN2m zbzKjV|IGp*i+k|b*{UB+1@OlN2b1vL%G#PM&hJx=<1F#I|=<-?0#cys*WN+UGG$8usNSX-G>=F?+R}tk4?=Q&)UDO!UvGc;I8*E=3H+!d{yxA zSRz6Tr;KB&sxidm`NgHdf)oej5ud-Nkje8d^7r%o2)zrnO>+6$#kIqXCV0AK@K3c! zeSen6FZ-91nQJkZ1=!y|wY0=xO#9p&{3&VSB>jV0eNJ=XmxOtS>@Mj^rFmw*B|Py_ z3D);jzc7Bg?!0>@uni~+MA1K+sb+?;2?G9vV-7?!DZKz^DBJ!ABcm5^wx|89Ore{w zvMcNygElO|VG#v2uwO4cL4>UQUhFJ3%s#0##Zpc>a6l=0T#TD^!$~g`d zPgz8G=8mYw<{3<@bgPL%Z}aa8Q{C?qZ?kNFM7uHs)z)m+H}jYG6@1xFQ_bWpONtAE zfvJa1g*&u-?6ycBT7#r~5JO`U;+2pe;O$1_{o|S4@gyk~u8z)XvKc41mSm!DnIDCK zRk&3sz(}!s@p(*c-)EI3^=#*Nlh9nfhJ0qbQ*+vQT`V*vEEloXm(;#Lu-WGKCyT1B zzz?T>T#0};=p&2-Sx(R<8k=bSkPo674CnQ!k42?DMv_w~&m`5SGI%6hR)ne3GfHBF z$;*Orf+FJ#wtb$5^r2ka7k3eMSeit^*Tmx0=MebnaZJZ%c&!#QK$a@<^JBDT!O(`? za~j@%`&8`n^Y6jK_Is%X|2TlkSW+&>t+$CtS;v;_3P_JnrqKzx2p$5=7DYf#p}yNf zKJs!&@n4)_7~`|lXbTZnBYW6OPOx|xCg!+fjFv>J#qzN3%Vy76y?(6f&FN)#8A?cf zzK~U;YF-q)R=D2uD6@71)+^vXg`cQ8V>WfNI>n#`3>wuH?G4ipdN0waWe6dePYp%k zZ6<>aA|f3hRAf|}*XgKrC~GyZPGDPUp_(z%DF~W25CSh32C6#GyLe0OJ(*RAB4|7cFG;H8)uuU@4nco!tK{D+uC^ zLLpWXL99LAGmQ;kJ+(rsTRvE%TwK_W8AW*%+ToBefl>dQKttz$4D=UIXfFV^47#GB z!EX%FHyN0ifZzPfeTd1N@m83^X_XBE&VTx6YMSwL#OC`?Qv6|N1mamY866*nX!o36 zX;M?JXDkr+w_XVN-aW(JD$z5az5sbiq+g13y-+#?L;On8OQ)DG2D#4kTqh*cysDHS za+%Z}0)?}MAAG9{sW)*BSpVf$#Uq6q#it~5yugQ5gzbi94 zezg;##R2ZNCSN|!8KQ6aoFEY1VolyI?^py77nVdq!jN$Vt<>MIvR>}7T5nX9NIG7W z6u}T~N!aj|YcN=4F6%dI|f%r0$ZbJaG24B{I4G?k;?S6u4f(^=25s^rW~ z2!p!_bIc{^6_uU=^-VFqH4T-R9P|AM)R_r%J^@!(@PZa1)pk8z`}x5+ z#-RkA7;*&Yr||*pieIpP(>=U6u#B9uwM^M&n_~kgbwaSpQSZ@fPxpwZU|Tq1;KdAW zjl#K*47`|Orkk8tUzm_Yy~X^6o;~$E!12OsHh#@V@Cqm2WoJ1g`_A^w<~&noin*=`hI z39|1~Z`iq?P&6yn-5jfMaLPKHSAM-WdQxTglw)~ovwnHj&BSctXK;J)SqFlWY5q!k zV%02qZsN3_WVlU9Isd^FU=rc~SK?i=WLGO{!s=$nZw-1G1okH1C2l7uKD>-0aCP60Wxc==!N^^6ADeyz#h1)%R(Q2UN0# z9V##wzDSs{C5vQGI3p19DTKC!gR8!L{c;X`2G%YoU8OP(0QdS{+K{632f`l zN55YaZqau42o0}2Y}~#r0Zr+PpJdW4t2yP=AD)w&m`YBHu!slyCrD@KEIe&DGtB%1 zZI{1VFBT>;@d@+wKLv?CBcs5L(7B(ra@+0S5LfO$l46<(DthNHQ%e%22FD3+okWYU z_h)<$v6yN`7LFH8o)b3PC*XQ;Ti;@l!^Tba8OF6pkqV^7Pty4 zw^2nS@=GYdAGvof(=>D=pvSQGvI`deja`ix;A}j4;@F%|!DW>bbg)6HEM?>g@R?=@ zq|R!)Epj52B-TWCyU;-cNVj!H0WTWJsQr(Gz%pop&6lFZqy;*|*H|wX0@p`94I7g) z10=4hl4V!}C5`I`2$GiJ-YN0!(%5n3We2kk7vg(kHeZaJ^y36r+)fq}FZ|kmUk=`} zA#)HWPxac$p}##%le4=I$^5e~NX8RVL&&Yn6LSL}k!X10rh7vPUj78%L#r;-o^GL} z4v2AEH1X>ATNK=7%;-v2|E6cGR=O!R&Ui=GLy#%I`O6^IdtI*~SIz)9P)^{+h>#K> zvKShvCd47uG^i-<%^T5`1t0KY8&g`o`Z$qDYjc#BOpxnnEMgj)eB*km8F0i}%nub5 z*%;&bP;dG*-36t~z3a0H#V2~hE`wG(^p1Yg$l2K>POCU77G0`QM$M5@x6^~$v`;r) z%gl;GPu?dfzU|*+yS7^>=shmwPWFeBo&nolrw{Ehxrp3zItv(;Q%Vp6wNq2k?8i_+ z5`=16QQd)V?1FaS#5H+CI|4@aO8XmGgi-0CH$74{S3T zs|MDZ9z<&GZy>sD``eHa3{hTD!33q+PZ$wt9bsQcYhR|1@T@7yydo)bY*l)639$;q zW9!@A4cr{UvVInN28NdDmv&TFZOZM6dgge3hM2#SdamYHUlnnKjvUmUf!OoItFM+T^rUH>T!hC!MFwMEWGkM_bG`%A2&Icl4ND~<7%O3)Se45_YECr9wbA|1e7A}d|jtM5+4C# zkfi4#7MBJ)oNOKB579AOK_Ob!hMM%$@uixK3NykHhSc}lPN*l@(QT2FX_(~UzJ8%0 zWCabZuC6*Ivfkjx@+K@N*EszQk9rv@-zNQw;{3qn4BmR91<(h$ zG@q4EX#S?X>&e@&hvA^p7?ws-wVL8H8zfvO4~@;pzM1dtMm~c}wqH8&N9&W-%;)c8 zwlI}8UYsZ&FpgOV8V(T0V)~=jpY1`H^FUGChPdTwz5%+9_iX!m5KMuCdXs{@n?oyy zvIaLCgADBE+jZoxh0N8~1O_F~6XcaSXN|aHTlBbo_yjc4Y@mP}6$$B;JT0z@*)i_3 z_1|&Is)OHi2q);*ORh0;gclEVOhrRAm;hxgbhp1s%$JVRWQFyK*FGx=H34ONDmOXh z(Np&2w-=r#%=De(c8cUx?=8{3qU_}M^Pwg581pB3l*e;S!P*eTeXgQ?31!yr|J_8= z3Z&#~1F>Z6mH5UjMkudKfUpW^hUmgPw^=|IDDe1G_wA|Ul=e$MJRA!r3@k7o!k7zD z>E|=B%DlU#=n_S@Id?D#a!9wuJDvli!U_vFARkLjAQ)(ue{fXbYUUPm9WpXO&hU#B z@Ja4A181l|Rb>@x?>8T#x*y0Urh3stSivdg9g}PKj@1@N$044Nr3*$_osC9ew>hBP zpr=$W91ClEGZYj$Ruw_f(5qkU3=zhGJY;f+*r)lLJqvlfQ7wo!z19oUM@-qjVm{xq z`X8%wt7f(DYuZL3Tw{;9ZuQIRUis_`@A2IW9w#Pom$1d2q`Vvd8mA!8YX zE_2%v_p6|NOF`~}E!~i05jnZW9$U87IH_{Jqb&`c{XimCTBFGAlEx;#;b_k8Sm;OL z*mwy5p8KmoNAK$Y$Z^(LXP8#0O?7)Tok}{)?TMGicrTPwP7q_3bOD$+i0+sSxs2Q| zw?4&Yb0zgAd)wyS-ikUIJ6Mlr!ww5Y>#vQLdTEs~qZ7ar!a{%ilv%^d!%ysih4))EDqD(9{DBhkzBM10YW|aOu>vkDk z7^5fj!>jiaY?l^ntgcRY#N6Z*8LRwPXw=RwC(JvIj9WzOmoTmOOE=`TPqHr%`o_5r zPJ=aH|Izy_qXJjes5|Xj1e~(Gvh0&AGu&4>N#9pGYw)dVp5T`z*vs4VJ*g*m;2;(H zCe!74v^1T~GhWJNOXW3+B?}hNN-9xGO3|ydt&kS*L z2_I$0BOGI9Y$bB(-KnRo#k#GQryOArSKd_mbO?E9i_t-$HV*pEPw^?IDrseC8v6cI z*=$jg5jE;BSuUH1f66HiM$WdI0wXm&Yb7`25dN`ULNm#-<9O&G(m5Hf;0sD8T>J-8&9(OR|_><^dxjNtd$#{CW+6IysLu5 zQ^M<|s8mD(GgFaJAHP4(UPMy0=Tnui<%hCqK9eN#$!WF5r6+Wai&A34BgmIC$YeiT#G<+4DateARNV+5OohKcF>eyLSGk`9Y_CzND4 zkudN>;!t8Z?xssIo1|Y+RH1!EKr6$;tt?HIiZqT?QI&f|2(2;D3~b!RegKbZ(2-T$ z$zOg<@Y9r5@3Q=LAS+w#t|5N~@PKkRuEjyfPoaQlt|;}!V8`BIb35w&L!j==(j|f| z)kR_@_%hJyHFWaLtsw~VT67M}QKJ5;BIJ~lg8JYCMNKyGC&M0s6(|m%{Id-A8*hr$N@OA2P1gQ{S^e^)ewfnt38zN_)6@`k%WyldH&;*&X zM0kKTq<#S7E{sEw!3r}24=bZrBF?1&X0aDKXcRC=pu@Q&#ZVzxNC1)BJA`j>Vz?$~ z46%_YJcj=m#JZbub1n8l+mp6X*1Jj%JGbJztU9WyG#BU8Z&}~7UF%1fL8NH2P$nwX zCjrkQa1xt%iyHSmykjPPXWVvr>(icdu^C#W7BAKI>^65Yt#@ZFWM-x%2P&7>r0lG{ zeg*AgH;dzUjvf#8F-$^&LivoWJ)>!y#6{7~;gZgoL^HV7UwYRc$=^NqJaEL@a_p|xR8PN)!${_A`cY;sA77-{MTjm$R z{LkRmHwcR^H0AZT2m&R`;FRoHqMd*o;Y~TT1ALjh#}4~!-){HGZ0=oaI%0NAbZhIR zKPRca@v$1XaB|-do^B8Kf6w0h_>95moS-#g3-gg}8hCL>mx#24vWhp*1hDjm!k&L|5x&{P-{53JGXWG&x z)Rh zrwPt&G*Hpmoq8d7N_v*vhV(BlyS&{Kp0-Q`;zVMBmH7U%q+e*@vP>@NI!BpS9J57= zZ0^+k?bSi!7LpAU#;Ov9z=PM<7?OX>A*V- zLuGn!!tT6IhB=5kb`aoKNG)ssj;53!3c|<19T>}y^`1EMu2lh`WTD?kw)9VfDIp}h zHLhU~Q0o^=3kI>%3w6H1}uRD=ia0uW0AE6@P)NgHJJ=R7&Z=ON_^)0gN`K{_2{ zBjbktW*zt7ae5?#dWrlgJ`r_(<*zhNx*AFTsB2>#^8J_4KYL#@Q^+8G!0S>2n+~GU*~JU3Duz!|IARr zx?|yE-Sl<<89zoMqD;dewS*%(>pD+x;{hMsZ*&|Uu?Wtu8hY={*XiHL7HEvWi^Hl4 z$_ODp{1dFo{!2(UvuV!=9(;m?cVLlEi{MEmCMgP`iLt zK8vcTm?G2%)lBjMv>Wv+E@?*fsUExUS-&h}h*M6AZs4gcGZZp^Qz+VDf7T2%25#Gq z6uR4ZZxMne+(Xy_xMNHbk2E3}y*)g*yRT3k7+%f~A{#T)yaCtxgq%y;m(U`}=2omj zB&Byp5wanNx4+B{Jp0}Swy`5wM1G$4VnRzNIDV#4s}JjbXgsL{GPxML_k3OjqeYkQp=>nFchkz8%quQ|_%@t;tT_G| z=dIVvib!#=?RI9ty(R0e--M!Hw_CZ42tRbVFxuxNpNdd z9%A|C{%6(yl$W?nzmMAyl-k0!LoV2q?k!9}-FJfR>XwYlBKW|^o#WrE6<|b%nekWV zmRe|?dkK96v5-3)>IV!3JWNS-Y#J;E_t!P}>>j$s>%7{|aA=XzO$c4bKNTV$!Nj;d zq338^-n_r{yUz0`g4)}wiQAq0I}rYE^nqt8?dnjl#_-W2U;D*O{OTPP)ZaTg&hH<5t_mZk`bi6EhNsgRk^Xtl0%ti+DTy7Y9Ig4;fTy1wDMPiFsq zpmWjMJ?D5cf~MeN|IDr71``^@rzD3^PM82kM~U`S%je!jq?+_onoMgmjmtN#TWMlT zNczOm>-HPdB?w5AmIId{gh@veyRt$<}n?{ipLsT;<gIbZ#0*t`*{8}a=w>T+q&pJY&0v>}sk$d<-&UDY{wJ!& ziQdHX`IXx7X!V5k%NjalXFC57$Iep(6vHl-(BtfBmU`T?c*7+l0LDDUiOVOzd;2Y( zf#53uEhV(Jgg0fv0bO_w1QH-AcK}Jjk*wq(9u<4z&5YN{Z1Avt0Cn_90JkWEDD%IJ z2S2)96}4#iSdDD<;UkhW- zhHzi=Lkbsur#UP(ZBf|zt;y-6RK5GU-0@s-G#bI2@)D_D(OZmFpBto;EwUj5ir9u^ z#U3LRiG9!FhSH$tqh7-!T0fL}zv$D|nTmY(J{`FwdU-(<5-r<*C^CQVR8&pPst&|KfJ6UNp2u0j0zOLz zJudU#u#|9&!_(b<-O-OGGa%ap@-ZYHn7-KJ<>B8-G-3p()>aa1!>+-;t_sB=+`&Aq&LUoU@O&u4~T{qy))H zE>63H6t)3hhEG;Tb&F959|~-Pi*V$Gr(8 zEF5&Tz4;hdSiJPnX>KusS}6I)6AM9P1TTAHe54{xW9}!2NWe%aSBPIxPp|}bV+@TA zJjYT3F7^A%XT@*4(jwNRMVcATN>`xopGyCuV_D-;w56*N6xiekPdgHg;kcOEV|+(? z?S;X-i9sv|%>237Ct>T)If0ma&(m^v6Mo`gDrTVZV-G=%i|X)*&bF2$s->wjQ-M%L zC-VEF-=~y7Rzpk#z`T)4g!?yz;qW~{PryIztjJ{ua{FCgrbt-7k_u*#&dxjpx*RL- zm!lRJ>^u5YW7J&{)sy?5N-7q8?ceX{UO`T@pbc#j>?)e{6m;qpj@WPLYqv$7se&9H zt(UUYH)`FMykJ6vn%{H=G~5pY+$N5W8d4Md`x;U)YYU1toHd{ZC5#*5bW)Jfuoi6t zd+ZOiCE7Er_2(#l?QEyFO|NtjToF+5ij&0QiN7%M&Y@(}PbtV5!kia!O4?Zrc|iT< z+9e!4Xx7$yJRntWEG1WDlKqy*7f%=)PObm7B%`(Ygp62 z(w8=OcZW7bT@iJjJC{O#W7njl0x)d`1Ky+G%FrR4{<`O%71LznAn~x%u!SNPu=dH) zy1ef3pimsTnd%bcGAHE+##~UW9=GkaJF5o5myBc>Yq$$O_(9;(8jjyF`+)&(;ZX;TTX7rdypv!?(m}%^7)g}DsMHG_?TDBn`?|!{O#o8 zWxT0GFcw6hCb!;C5uDf45@j1mN@zX?bPEM{*hPF=?;Amn5x2GyLIsup&ix$Idy;cq zqYoKfOM!d~P^&YYivc%)=?4oXyL4G|I-C|!cPxAVOTD{`BE8YGM^%zo6PhcyJZH93 z<5--hOhH*GVY=Pjf{Ii+W+n>v1hVsybPfyaV7**TH*?P~{KJx{u>O|?BqW}Mwn7-r zBwM0hqA1Z0M;a?H_ zb+#ssf%+R>`8DfFq@+nx%W4digWp?_jU?4pwrHXhpDp^MW^%8*Q@lQ7U8~ zaqLFq=8*|9bv5g+(ph3tnh+LLwc-BBbRNd3(XX(cv0=J@zIs9~x+Za!je61#REKDZ z7EJ5AzHH*=Dt8*%Sy=#>s3o_vgV_GYwePa>-~i(spow7{QarRqcw;iu{N*_-<@fQr zv|O|Uy2auXVb=pP+z7qGcIKrD9zo%F2OM#YUC)|na%B&MBMPnB=mLM87uK}gp?}Rv zMrv3Y#*HuReepyy4Tzek@i@jllN7hEuE0wZnhafe10kc6U&Akzf<(MW1ukm5K4Owc z5xx!^c9|MgJEo^>1tAIs9CER$MABY(NQL~e!|z{CLdW1EeQd-1s|4{cbx(Z=R9q1D zpn`*zWs+8oi~q|>Bqp~r&^_6mTHzh~te!sy5Z_yZiqOIqx!KGS#t_SYG71EPp_07Y znw@o^FA+x&N)xHT=w+lZ<>VQXJnbv`G&^qTR*Ygny%znNZilnch_763dk0orX0gw` zI;h0b>5(zverM~qiAgzPf#FnqMLb(MO~7!4M|4JDNjshOoj{n3XWKA^nr+Zg>_l4p zMk3MCQK;8B9Ie--Ea)yO@{M7%#JVXZ@bLs%gAIhI;@MPTBwlTAQ{Wf#nI2kJ%q?ci zer)7cLPR#QPh7W;>)t8wn+yT>S?OHR9f}~Se`Djr<>6^0b`qy+36~QK1KkN+C^npw zOn;(LwzP3y7<{`YCU(kp!)EC!bdkh3wA&)0=|;7oYLhcxrn_CBn;om?-^1bx7@+&@ z1J%OzT-V9v7Cr+%b2v{s0HlyAHuPYS52bkHGs-5mL2xi9V1hLcUa_OX$>SN zK1zP9s*5YZl3w)2wHTw}h6#9XXcZ&r8=?wgCw8^tTpc>ZRbj3zaUG@&a+%Ycp zjhTr?=KcbIhsB1%#=;0Y+cL+*%usd-$sis_m-(QIX_qWsE4nob2jdexn+jC3d5(T} z$%^VdMQ3wO&6T=_@;Ox(sqE(eJo+7lYs}k^syXhCtuy1ySX7pr6(RjhrFW-ZM)K%; zXr2^$QRF#v;6h9!{!!*y&*{FU_OR5>ATaZ%cu7kIGybxVL4O5XCx|u=s#vZ7nF(Jx z*|}U`K)UU&4k+8dg_I_NeW!_7!->G;4#nm868!1O_6;X8=|{yH_fWIOg5)3&=p%@# zLgP^)?vri4`S+Jj6xebWq3G}$qfYP#@X=@f4!@uNAIOK}nh;K<{u_)8bOKHYKAVrR zC^MX3wu1*#9M6L<7cp3cJRa$)xbKNkZsRh`vXdJ0CdBEu8AjH0`1Wr<&|+$>%dqU< zqzD~FM3$jYLnHt@bRdL%nT4wR#Vgmyct6TG+u$njB$_;bPO~)eQ;|&w%ABo>-o0-= zKcWVIf!0GaI2rpYQ`MxyHBRN7_DVE94ugc}!-wYJ^!(1Dt(YXb6Uc^hb`~no`0|42dsHpkcb=?#P z*7V~9K-HtOkRc7Pt&g1q7$2UzwL0Ur3m9%$>JJ>P-+;pMqj$>*z=CE0?6$hhdiM%{ zepKUaLV`|=M^6@-`_B$PwilrN%mKZB$DH|1cf{L@xEOFRf#1-A{olg_LFq`;136Xu zi$YD7{WgSS!%khdO~ioc_)T0gQw)qP)&;b$ZGbWg6K>7b#6U)SLS8-VG~ds}~u#J7s=(K@XPLDn+Q@olCwl)#r_;25BOW1%s8AOY2snaS+SRG zSMH3KD8{D@d4PJY%phdT+jeY2wMEGqiQNVMFWU|K-BzWrIz z3zPY6OyIJ{CR1o#D4QM#&;IzyFPn;BVF3g^{;-Kb!CDK4RI#L(Fc@MQ1MN!;G8`1! ze{&(L&3i+hrx~ws#ISB%GPT$VpVq&VN;H2+qyO6g)kk>Dsb~EgucUW@iW1R`+;^Y_Yc) zdvoGRv;K}bKCo&oCU(h$B`eTSQC=V49Jw794t#)C(WH^a3W-r1tT21e19gB3xlVA! zb=V;M+a5LlY!9$Sc9JJZ19e-d+ASsiK8)5iA9P`VtAulG!sH>DDUDuR!!nLnAanVa z6T(NP2&hrL?E^HSGQm9u}vgbAzN410E8t!Rd&UH|B>Kz>tJAU%nYr^8OP9k^MFtWRt_CSOJJs824{nhYhU$ToY0g2j~)+m zw_t@o9uo~2?ba8a3IL-?hT$p1yNGf4GxbZb$>ZymW)0HKx{CM}(O*P4%zKh~LwSr$ zX9Ss+ui@nO%}UQff!COPjhtxaH*K$~UH!K+WX(nGF`F~XTY1CA&=Rbsly)Pf6V;az zn!ImzHC?m-n!WPTDT8cA{X^@6SPHQ0ef?7q1Hy%RF?2)kW zh@13&pDL487%bJtB^w$2em%A>Tw~;jJ`s%yIH8Q~IvZ2~PR{lDw;xeEnUfzVrzzKk zaUv32V5G7G( zaII_<7MefSlRdiuCfd|c5ISXrqeMB7ul@*mUulj~!rkXSUl1vwKWkLMxQx zTahKPne{G&j^=jALimD2L9tdL_8&)V^XYnRgQdS?SY=Ycj==ho#XJX@9kiooZc4}^ zZup&Y)>QL390AOxQAf(@$qePbcNZ|>KAgZUc~?iB(qnI6QsO%K9Hxa z{mQo=D8d}=m*dsp%aAE_P!STh1pVefQ4h&<7`?frQw7;X-JurE*oIjlOtlnE>-m(9l}WJ!&F{?OJmpYKo#trQ10b_NUzm8f3a}H=T?VsxL43DI&JjEVMY+t zF^ibh6sNd{kQuBo@5F;ud(m7{xT@^!#9(3I)O=)gg^B4mnp>zDcc#Plptl8H?^kxH z2ZM!U-UU8H+!TM|RiuiXW&KeU1s}Eo%sX@brd%mly$)C*)QBagFJ!H)@`eGv%-d>R z)~_Kt?XDX-6LjEEQ~@X0C}M_2^=nqU3eHqemm@F6jtg}ud7Fxc%FNc^1b z3oWS1Xcb4yXV;Xiaa&Hl5$O4E=U0=1IrBW^m-SUDX7|=dwh1BC_bAyR=jv6+`VoGG z=H)Cf9DfUwwGkL=wA3WmTU2@gwQR251GXG+X(+66Y17yf(`lTfE_cYTKX$-TS3dff zsUA;WhqSah7+f9TYA?8A?627Y!c)Fdt0!I~y+^4Y?736md<53~7_hp5Ny2HB8r!r^ z0?TB5w_@8t{$O234p^I0E+N^E5r`oRkB9t2jeF+&$I?LNmh86X!@L{3Yl<~f%c`g` ze$tqH^9x`5imw&8i>J4+3Me)&L2~to6XfW_$vCOQqr?QdW@&(a)o{u}e}+@#heDM2a2+-)6a2QSNi!%`f*t z0TsOAVOBa00nM*))|L9imFM=hx`EaLV>`&UZ}Z8yJtP}?&umg=A+GTs#v|bUiU7!T zRHi^d6$BTq4C%VJLAc28@mZIr@-gQkgPMcHvsSEkIRR}_fLk3yIOgq?`zO8pA7wRo zez`k}dG99$%FXN^D43APF&*?mz`T6&GJN_5){ea%>Y0lz1oJ&i-4f5$seS$jT1laQ z$Ya{(8Ro|mZ4TA@_vn~0hNmDS^Nr95_jB-#Q8q@l$5&$Qp}-x*7=RYW+#sPNjd>kr zb=E%s5M?vjVYj~_B&xiuwV|Q4n@mAhA4gAdq9Z>Z1&*}jyuIca0nGIEP!-IcyCf8Z z+5osxEkm;9eVHK@_LgCvmyReTRi+8HIrcU*$4@}^FTwkh?Z{Ze{M(c>(5l1d)*LBZ zy3^akEP|oMV5!2f#a+$HW@$iPuH9ZK+%sAC-oSqn;xoHi_k8th@gb~5+ud3`Y@ZL> z=0ChuPsbHJey>;oa5zC!{$85I8JqwTf6XR|1$vtV;ZVe!0jb$?P8)lHo>!(oN5!|V zx^iGLyUg8C6_?x(kY3)xN~3fSYFFwY~c`7&P<)TwA#;D-9c z_GU`1!65YHc{%JDcR$RN+W_=7rko}vGacknZ~pr8ok3wR8fbmgd5QiA_#5W~oqz^f z+W@*iEAIBAW{9^YYgk%_;`+1JkV2Y8dO$Us3TuM&mnswZ%zD>aa%gA}(Uo7%<&Vjp zA=Nf<FA_Lu6ldLY}`K^M!*EFn*StFKV?}M-^AYaUxP<% zT30f^>>!baAN2()N~JPV*O&K%q`=wsWdR%4UcY`+oE9Al zd4N3#=17Hcflh)J4;FSM8$%)xNOmwe5Kq9__;!RahE^Myl2X+9_Bsb48=sbpD|Qv9 zV={h+-5;Hd<~WOMm#^Hn>#3~=V*5op>Cg2S;TWOlMZPnv{f&w@t?3XSlG)^;T65u()RImyv(?RL$}?^-)pdzcNY} zb2+c7^j}%*dUg z3@d@dW9{N}!Z^i$SG{d+sqq!>RMZ$pE&*Yo8J;c(ke2j8TS} zaKS+B3@_uBF3WwhgD9}B?kzy`N+|P3x43T2vrT9n{%lP|zhmnv$WfaHX{a4y8o?^` z3TomhrMK=wNVm7Qg|$%=+>6kY*#%g$Wi^hAt%U5(o0!|udGzzg>Ua=Zkb1`cO2Ws- z5+Z{zwn&eZ4A)_fz!ob2cU`~VrUf6z{sX|%RnS;brker!cS^af^s%-eSws!MNe5y& zO4`B`4{L-T_PoDTP~j7TzjVnF=Fx^8BNo|VM%xQDsFHQI(z-$>6H#y(M|OU{*VDS^ z`WtbCwM+t^&p^)Z67ahcZcNVuYqMYitV%C*W)s)dYnuFmu4a2XR;MiGtSBN@_n;wy z!GVB7dD-Zx*dpg_Z*Nx+n@gU=F@d@G)c#1$%G+mWQLvy$_)7_kT~Cr4Ohja)?m%zL z^^^#!;VF4Cs?{maj#&I?f_?C`{1r#WhT$Biof!&Rom%mF3mr9nn&E5fIIF*FKq|Y^ zI)}Q^ZI9ztA^00H@gZ@D6&xzm0``%%l1YpROBb)5!Oj6Kz}J~W%q+h%C01KVOn6l} zSQk*f+7vcpB`c5-ULT6r4~!sKEDY8hiTLxn-Kk;i<@0KFJe41~x7$j~dkg0w-I?%y zSX?9w*i*FHv}MC460igmw)-0ozN1z_dw=;1Oa}j6OnURINZen`@Sz#nad~L8FYSpH z-Wvg%Z|{Z3SUiws1=hMmAB5J*0+yZFS7ZEK4Qob?xt)+wIMjq%dc%+w8CAGcCi_S} z6V5j383?@(Z+Dkp7iB3STt(NKJaitmU05jMo$B-#GS$bTx?wgJ8avh4Zt^}lb(V0? zYu~D1ZE{Z`BSip#=C4oQK$*b~kc%_GmWcS*4S&EON4u^KdnBgXnCpxt>K54d!@dSj zjv-J9R@Vo2U(xnA!ly+f=>0EXR}yN#1M%04*;-8yikkO>Li!>tdjWaDHi|xjwn`{8vGGCMUTa?oYHq#$CSeOk9GYBB9KQQ^Kw}OLX z7~o%mEm-uUs=I8Tqn$$b;Vj4tsXyh23b&u`<5i%xU|ZJ4+SY`39C~@YrZ>$KRo))O z8$iDF3+xVo2r(Uq*UJ2T7u$~iBSbSql#lNQEVVlZqlmFnzvX-5#2TmGBaF2Tvdu2N ztOtTlqamxA>o{@2aLZ^Yn0iKE`kS;$)i;ATHkQc8jH{v}83b;ct%Qux>Md@ewdJnaR-C!f6EjUXd zdYU~kX|WPkXQoi)f%xFaaBnG(xk+stO!UH&uVZlLA%x0JeUang<0E`;76979JMQ*P zg*RV)gvsFJ(IR2%%VV0ajAu=D%jqBHbdmRG19XLI`xrVy5cQ1pJ&mxQ#ad3f{arXK_f$-?Jt{Jw<;IcLTloI0Yccc9ggd0}mgrHIy2@Ybf&xgfP7=Na z_rjtQM#t!G%JXyhO&z({p2(gW=*|RBFPl0cAodrvBNlpdq^;b-2Vq+1azdOse^FOcQbJwQIFLCPkfM>fz zk9DU3$_$sQl#G6bencS6#S-KKf~a1FiB3sA&VTy z5ABH1i1-KJq{;eN(pEIckEc@>W8WqG_cN)<-bmq>oa67YH3|d8z=B)Q&N?R`4t}3v z3=zR0p_{hC?!TQ@be9?IRm!>g=w?Vb2VksL;k^lt0H}(V%GS&_rH@oRxa|sHp8ihB zlpia$H=RNL=Z=Bt7*R#nk|M8mt-cTOIijT4ypM&GY>$v*Hru@&U=_`oP~<7}M{RZd ze^oR(wD@v62lWa^u#h%#jekuJpd;*!+nF1Wl?$z@75Z3*7lX(o0CET}{8=^v4#wy4 zn3I1H&5+SodM&UD#+xHMUw-EWq~X&tn$qeH<7j-i#8rE`5}UNbs{6d@OaKtd$MVtE z&_cSMM{ZhV=>5j?;?KDCl*d!@^dD?}tIN@T$wXVs_|6!R`Xpx4Qbh%9s~ch_Y?V#o0#hQ1 zfs@M*4?_bR<<3WU&-W@j)qiCTW;xgA!aaYZtBjIz;*fwKcEU|gyiAA|}y>!T5YpE#OpcTS7et?=4X=UJ-as<}E~?LVM=Vc4!X zwvt(^BCHHYYRKivB+%Oteaq)hP6tEBhqz=xM8s6q8yuVcB?U4L9Rr?@3`i&LVB`Rsiee3JIRf! zOaeDQVeJB?rdjP^p8L29bOCO-N@y%l4Ar;7pGBOyV6vgvj$E zD^Gt~)>N4ImU=HpdB+U*idtC&12J)Bv=Kgf{w;H#u03xf%*V_nFkjyplBqqmw`hng z)$T#R?>1T&v%_yUeg9ziju|$>0?0&J6uLv#1Cs_um3)izyonr?`HhV7Pi7-MO<&BA zu*=xU5FdR7&c6WVunN#v`kRo7YOZ}f+*y8TM@NR-z2-aYw^NF}J1UuX{DWJshkw)S zpA(agUr%Aal7J#&NWa`Zy4uDWBw+%XNw`W!MHQ$OPTb0oYPQD}cguW|amB-}N3Bq~ zOk>od;ii;^%_FCDKlgCX|J1)pR-Ky==cYa!($|5P4b>qCeq54IiQ>Stux*9X%B+ul z(Gm<|g>=?5Kv$m*R)09XH`LgII5c`%B_dHgr$>m8IQR@+galK`8y+OSt0@g`N+`8zSa6!J_kQeNcsyndmj6$E~%TijjWq5`+?S*F(m= zgBEmiUiL`PZKfPCr#*Gwc#0#S*`OvEG)STSciyp&J`N+sa&IYCU1Si4C`r>OnqxaK zJQT<<%oc*Ebmxd_ho8l2l)Ltu)!SJehpAxU62V44*^0D)Ufc`-N(*&rusd7e)(>!< zzt-v2as1AM|4cuW88S8%mzo#}NzFN7HUhKM;3M|o{LsLx-`j_ZIGWS`upnD6l>QuC z?KtVES3p$aLD5r+v(Eg3j9l+nmUbzz`C2Tw|1|kx&Hx>ig06eyQh=F-LyaTro=$t_6HP_c-Fv|o+Zy2NM zb^%w{;1XSFIu_5eEa5J-Dk;;$C2`m3PLa$!sH+UnW#ceico=B`tG^JK_E;=Oz4Loe znzKpXN7Al@m}paA-qtOQk{(X?vBZATR3`>zoCTO|6I!z#t{N7alN{jKX%xuZQ=N+GtzoMIh^ZzC*<3{&rFZnCA_wUwS z8V!WYwVeQt^=d&L13$^^7}ZmSD`Y4e#;x$VK&nndP z4Ff_4omwYq6hW0p48;t-RcIrG6Ur>9t=Hu`Lzvk_whKf?M%zDYcAnkre;^QaNVT6h zDM;xO-@u+LE_CrU!*Q%Yf-szGcQ1d&a^p(bNZCx)+Q0x^WB_Ol=VTfW((b1HL9W&_ zrR7SP^_xauB)m-1gYk>Ut|HZHmxhx?b)djeDA?9~ju+Ic_!&9P zwpIz4YcQ~uQ+$_c7nmUY8-}dvfD+%tGkj~wRuOhn)RExr5t__2!ooZ52kVF}|5UV}GA!-8ja5J(6B?*D%}e7G}3m<0;o}jk+;h zWFBbegRKxb(}1GW3s8ZiNv!(v>&SI4&=OTTM7*IIvbQbzb$8?aEoEQmc=U?Vpx^i{ z!m3=oqY6cDd5wpewq!xDr%ZA<17|>UC9MEy{$dcrBHOm^bduswpgi%T7ocgFRz6%} zPe2YBLE?dZUeWM>?hP+DoYvFzIa5CNdrOBIDWU|EE*kHN^h9B>>QeZiGEtdm?oO%!ZDf~-ri>`qloa_;Q37BsMaCm%rZ--xFf=l;?RYau4LQN+t38{Edka(eouMNEI=|l zzTfmZ)fB@=Nt=-&^r)#uo15H$JKnY^%SAv(S?Q<7={x@n_Zc@pT%+qA*S9n9N?pi|PU_?$60!WEJAIm?k{UU9%X^Z80^>qoW#U@SP*U$3)& znUOMuM`ianL{lY_?gC-5J)anD)b}y+%O;X=`B6zYi2A>U{Lr>FROvgt*K*TJBli0E z+t_MxH$x@2a1KI|s3pyE4wI8gcd*)`vYv}K$!`d7%GJ`>xIwATH`sl~P07_XHko=& ze!9ly;awOCMCAu^J=;M^k+)h0Rt%Ew<=<}JsfTHgJ?Ezrc~FwupTT1(ZMkEJJd59b zuSYvDBM$e9Q-{@c?1*fkf50EkwRBfGJspyC<;I9C8Asa(!xn9hE5+{3w3>a%U24~p z(&=aF?-z>I3e(HPV%3T1jbVyAt`_4#oUz4H~XOt^4K7i+;9&!4X zqZpSV8U(4A3mGuuWCa9OVhQ^aPQS7S`jWMd>=JI(EO$G~_1qvZXe#dy`PE?S_X1+Z ziw=lUzJAUz-h%DVa*CEq;<94=0R^=V9zMxhawFkQq} z7D1rBzZI6I3YDyUHb6Hj7CH^kLN-^08m0eiPG-4xD4s7DOf_(jnfmWF2@ z_@5BEd(pT?Raoh=4P55|r+THTsPw9LA};Zn8aJ~cE~(!?EuH@YrA+HJkqbrGNT`wK zsNo$FM1ZJ^S>y7@sSn8bWmO#mb?v0ni*A%6ybmkA$(r! zXuQAj&D0gjW=%}q@_^$fQCZtiiKg=VyhD8x@c`b0sccyE>=FqUov3eLCqwk6YYj!b9?yQBbkr$ixLK7k zz=KaZ>s94E5+p^lq`YAVq1lL%xf#^IFm_3KX!eBtXFXuHT2$X)_sC8l#I5_`eGl8= zF@NZGFmhTdv2@ia-?<;QUEwwI-^l+!B9m|yElA_q;{`#&z%Ybs3?FzhFT)-EkWEHp zCL8$yS8X$%Rc(-%P_30}%{KuI@*Zaq;q*^h*jeC!c;zeRtdAe6G;Lip@{i~PJnQI{ ziPRH@@T8mb9HNKcxBHXyo(OTO0F_0w+yZO(IGNYNtV-E zY}&x=J>Ex~i4N|+qlr6idg}LVos;*Rtn28~HxG8Kd#nd75rVT9jV7V@eKEEz_WJ%% zVrA2x7AYDjk@T7L&^_a=>xIy@MQ@0 z995T=XJN*?A7u;q6#(PN;dBZO;SYI9>$;B7DGO2_1El%D!g`j)DN7_7Pwv`7;MjzA ztqqnNwdBG+uPx#%=A_GJkkdq?q-6-&j@7(-3)>KCn4qnrZeFC5q^SJIrw7fR$-j7D zrXoeD<8NaV>YCMS8rmLm%khakoILBbF!eSSe0r?Cut`)ik*w1p`8idV8c&_YaM1}P9to$((yBP)(P4q!!zEy%ry2nf=!)68YsqSZPv&iSfO#pjDPPh5LOZEbPa z-vLP)1K`0L&N?4z1L0*u(i2VzLPDb3CMt;w>8~srL8gnEBdJ zwm2*H=4C|0k4AbA0<)rP*HfBKoE2WvC(J2f?BMokSya zR1n9Q!J;m%$OOC$Rgo5ii6ekZsdba|VGT+B_hk;+WV$6twcUa5kh9{a+d{C!LRf9B zTfVhk*$?bC!?t@?6Cfrc;rsX^Kb=Ur2>p%$8tY2WFfs1LEfb9Q_9;k40MHCVD>b+Z zAL-Lvi23z zud{DBR3M~wD`d9=!H}r@0q9SS4!qI0!ajyIg6M!9hZTcU14|4HAwvco<(nUX8;{GFDC!eN-?xfIl% z_V<;IXX$X)fxasU1+0(1LCJ8CO3I=(5CZcpJ{!^WVb7zRdpSlFsk_kToz{XKcCTA8 zlsOwL0*xp5KoSa??o{5iakz__HLcSxu7cL`Grq;CNANvIxp=JbI%1|mVK7)msaIj( zW~Buej*dhbcZFe`pw2>(toYwdI5v<$wIn`(v2~X9&1h~bR}EtieuE98k^VsE?G|+N zr+hzL#lm0_S`dG!e4h66gwN%N9OAlPUm7nh_K7{ax|&12y;}BlZ_kiXMN3RWof1|h zE3-28C9!ZIJpL6Qep*sZOj&A*?lZrPh($!i*t+9bk(=d;QV|i9=q>$8sq>)fw`kOT zBR?4KH78nkAvxt6;rMTk>s$N^FUz5*KY>WXNgs(N^PQlqTXHj@~SqWg=v0q^-p;SOReqhp1Q=1ckMx(3+$sIJluFhxR>_fsgYtqRHs19UVO z+^ptoY+LP5N9v)jcD+lM7V)y(qt>9!}nb$YlS?V4#LQ zh<{if=qzM-Sm#+20LM*0?5egGXx|UVd-wI|Z`$7x%*JSBuDD?^S7z>BNt`tIp{mq1 zxBY8Subbq+!%PbZ(b5-?B;#H9kW>LkU}~lgVa@-I&=#al%|MWpduQKe-mmn9NUZY9 zyqzdtrJ{Xxmos_h1G~z|fwu@ZP-`o7xLxm=vV7b`~0M8O**ymq&Fq&Dd56O30*?lZgMEy+X!Ai&8TS% z|6Ql*ipvS z+Vk|>>?w#?n6A3;oEB2tf^krH9ptCg{fxp=zQf=1hCKYxtH`Na`wRP1&ynR&TuN-r z%=Nq(QcZTueme(w{P+I0{DiQNrL6gF~;z?(R`X|;CB+S{cfq|Ip zpo86GpXJ6_^x1R((vjD99@|8;(qBXu=o1yjDFFR3pp4FeHT!L;lK5nonT$h+p?^ka z%@Rjm;VYtmC!cG&(*raDv5oDCi-GbYQ>T2dwAsPhp13?#? z0}5R{>6L7#8?NsM?MYj9E44bksNL;8!xc(I$Bu>Gd4$Ac@R{rcS3z8Ydp94C`t9)h z2uJg$s6&@y#VjmCy9TV|%~2^XiBV4r|FCaGS2{YzSdOfE4ff&dR?2vQfLq~gNvz?H-cjjO) z+gSK5;hYTaNlt-jTV6auI=-SDAP z(9WT|EAJP@igqs*kK;S422%e&epHs*-$^zPXT%@L?~_flut?^TH0XY8CatP=ul zrNjjMiq3;HXM{ZmT{Lw>VbFbbRcU%9bemZ+2&8|pq?KfqUKz?S$4_BIg(>zX)n(pa z#HZB~Zl2KK=R$}i>qVd4&$9miC55hdHFA(2APRuk$efc8=Kx`pD; z%pCv(k!?>|YCv1E-P^2?~YNz(XAY9VFVb+{x9x5|3 z3CuBm39K)Jf=`{9FoO3&Xw>Xu-jo~2)Es+NekR6W6O@K9$j97CV;HtLho8Lc* z4aXzg{(gPigNekyM`5jH<=(DTexshy4OrgZ{&Iu3{6qy&k3-Z;_cvl(xg?d|Q1>op zu>{GQJL!TcB+$7Be)IDSc_2p(H2zy*6kvQ(e~LP*DklNZf($C6NTD-XSiC!8+(nQ2 z5Adpd&Op=H|NM0DSZPWy_sUY%lI|m!{tJvcJ|y4k^}MFR_xio|7778^R&in`E066L zTqRO4vh#cFuM~Z1d=}X@eD;E=G-iK1u3Z=#1XH7aTX1?)tPv;@8jIJV3MeF12Efp; zZ_IR$oskLRBVob(!2mG@lA*)}kb41nd9*ZbQkp>>+QX{4w|RM?)TxjSsK+Q+^A{_O zuHUUux^s;GO{W9I2m;rVazua6$f$Gr>hE-$lJbopm|oD*@(uo1kS zZU;EsfK7>Ju169HZS@d$N!yP36?QJ9r>4=`Fc|%yZpW4gTXeqHGTN=%M+ik)OZw?e z^xV~#>q3&l4|^UhQp&XYruIgV|WEKAbHNJE;*@Vf%i@^~ko9QzhS2M6+D53Iy zr~lUU8+;z{N^|ZLwBQ&E>o?@q=OusKi7`~0c6Lig65uKI~Hjs#nGe49*G9dZP##`Mak*#tac zJ@4!lP4v_%*TgIy$*z2P@w|R-)ZE-twmBc$d6|@1$n-;WoqTT+r}n7N^~=A0mD^2m zS$Wb|KKbV+@8>CHJinCTZ2;16)p*&`2~J4|}6 z`%HOZa(K&Dfkr+t-(|qMnPF-_v>b%w*usWshQ8WES1{rIu%C&XvvFw|4P6%62t@gN zZrMTgffb87)xlR;Wd!nbC>2`3NUcA%$SHM`3N}O?MU|b5gpBNs;j%QR>131bkjkHmWPRG<=Y_O9^kW!LuIX<6k&gSS4upq4Bp)AgLZjaPA4@JpVVfDj*^tF z?e+Yz&@zieIm+fjvB)NkGUii|6KKq+cPOIcc4E0ikZ5bx@rd$}ZdI4N8Yr_N{NG^D z&KcHmbZ#zwt4m(WWYaByQ6MxVIV7>u_ppAw6#Xr3uPQKYHfUO>BD2T>^g< zT$W%@VLO&HY<19|EwSp{33MQ_(ob&0nantRiGOW9c} zlF2M?q9)q?OH?_#ld4jWYFeC5Qvc7@r@PZkL0>m19^SQj6|y{oQr)eq0S>?O!&MNR z?*!X{R~)J$SLhe&fc`6E8(C`uc;uJiukkqR&It(m>d{`1C6oV>Ui zP;0+D>P&HNik?Ez4@AC~S8>HLSQ+bas#n{`9)dw3lqb~S`79Z*S?ed*_|?FrwdSn_wZF9?=P1`7>R#n(pvQMeb+X?` z7L3oIDWOAVx(GFH3tV*MMc+m55ytTH&Pg@#SLH?>bApzpj`PJEKe3M#^|ZZR5*DUk zNo(q!*2`!(eFvOeviF`j(0BJAe$;c#7W;2kYbXl1Ijxwh>>*Y4d!f+}DPysth#n<{RyWDY2AtCxa!pU+bt&TVWI_6P?#D_jNRF=Evpw-p1@h%8 zn7DmgMG;DM`2zrmVKoS>?BW)mDC?jl*oge9KCyChn{y$SeH{|)C=0w5g4xEK%6P1{ zQ~>WMy81<_=9}M#B9#UZ7PASU+82w%1PRe&S#&Ap63o=u+b`i?Uw5J|?Ar_>oAM)S zZ&)^u{s(7KSfoJ8>j~18Xii{LlN`(8tyK+~GOKjwaq`yEF!O!65=G0|pywh|PrPm| z+mA20#yy-_ZKUh7cUB=&=1z<@sL+~q(Ep~uxCFAmPMa{`j*jT@j{0x2G!WuOqfB=` z_3IRk%iQuOjY|jcF3!EMzSf>b*~hzlK+?$eS&RtCc(LkBo!KX^LwifVL%!}_r%nNw zwfnn-jDof)WH|k%GV+mk(c3^aSpWy-XRc#+MXdUT*FqZEp!IBB{sWPd62ApzGtCYJ zc_n#{pYOw#@zWU&DDvTPL*53-N0GVyQsCK_nNhs;i(W8|)V*07G5<@s2(w ztA6H;Hy79+U7eoNWdxAEZZ4B@7aei{a7+Z93el^n#d!(#C=5ts z(qV21YBsEj93f&#V~R$C<9>dtW{8Mx-7hOjxnnvs&Jn(0%M6YR3a|en9EtrPB&L|( z+b1>1T|&1xeqfo9^YT}b#tfUDM+(pa4^!m$q@>(ENyjS-JrqJVpuIj?8Qz_@!ktS% z0$YUt0w)LvkD=$$0fwO70W{CzWy$Bw0$a-J*DVBWPSYf-<5)7*hinz&lN3w8h+v$r z!MBO_LYShHyjz4+Y{t)bmmS^GXPAz!poMuL-F&DNfpueJu@z3v~2PkuKU_)&*WobC}x?Fp=J z=*(k-4;?{0jTKDEj+YACnIO&C3e z4_(Ce{+jwG9=KQqu&a;Wjn-|iD&h%af%r+(FkjK`wa}k5qYs&TdoD)hx`SPBMF!;u zA0z@?Qz!7+k17UoA~$Bx$=B7PMphJFE~dt4Jzb<|w46%O0@U04E5DAYjeSTf%s+Z6 z)Iy#6CY)6Sc)Y_}#JQ9TD?AsNBt`0ZaU*|vf4qB5IIFU?1^v;6mL+6+Jol?xKHqmU z0US%e^+Z7v+d-Y~@=A85uF(3<$1PRlzhdNPBWi)IrE1Ms0DpWMn>_o--~*w~LeZ_* zr?lnS>#UB$D~e>zOTKpU_{{posfTa(DTmN!Um^Fgt_1e)B?hEmpoY1b@s)`>ybB}! zkKn@*5F#L5`5$9GER75cUrOJv2@A*O>UAX>Gc6=^2@@1mt$3%fVk;5mgfI74EKp+TVjy4j4DPX8Sd#iUY|+4 zlY5E~X6R6D=jRncydMOql;vG6VRY2x+38t0F=Z5BOcpwMdYI}S^m_}iC@;w36T`J! zc!>jVA`5*okiNdR%h${Kw;M*s^}?cbB|%s^q`4#8vEl)Z^&5qZB`R*liIDur*eiaAlg|0 zvYc78!-Ag095lk#LRt9!tEEeCglt zY4lau78obOM~g5gZf!j%5HLeQBOv_wYBT^02_ASQN#~f!5V$`ZR&IBvvA{K6f5 zp?2N%JTX`!;65K1T6G2`l^9)k&+j`w3G4s0_>)y#XPvCRFE$4ID6i;+{(iljelTb` z@E67u`&!sxkaj&`*D=CB5o7`C|CH+_N7|Nj*k^V4keKVx-VXU_BHgB$lU>7PGfooy zog&02FBdVKzc1)e-np^dpk0h;y1;yTv+lKe4Xg#~a zHN;xRbzd9$!3hWh(|IpC*|wZ7fx)Q9kSke%=U9EpDk=lj9b94&X8!Ja0Wdlt6BL97 z7EN%lSs%4i(cwf z8Tg1#E-WU3u*^f_7ftx0(W-j%Bsa;T+oY1p3 z!&Ht%G0xk*LT=;m^p=Yu9EoduWOhyL#%(IX-s~i_yZbUdXcM0W0}9sV!p;;b=vy>P zj&1NWxaA_;=!fRNEe~WBi({7W`o!d}ljw8r)vr26*Qf0XirstHr&}4%e!8D%&x#I5^b-LWIekb4`Q~;(hzb}ngbNX>ufUM&JEhmgT62XN zS-03NNEdeZ$F<+$?w;T3PbfR53M#wTKpd(H=Zg-}d_~`%SCAsYE1uA^>-it`1+?Ys zUC-B${y$iBE`=ug{m}|}eivsnU37>~C!eMQ?UO?w_U=h0+!2UMx=xE`gLs zr_3Ctvx0G=GaXs1n6ZLx0le7k_bF~}jz3Erk`-KKMJ^09g4%N}uNmd0%-H!Hv=h;f z@L)M$iQtRY<=}a|TcSg|-QOFk$H9e%I-fh6GBb6jJ!}rRaN<`L1qCw9sRq|e#I^! z4XUl*KK&08BCpAd-(xT>wBUJEn5@~*hrZw~=RHlVM0UoNDen^?(`^{l@R9K-lgK(1 zizS&S6V3usFp=DzXEUBPm^iFG*_VPVRyRxg;eSAnQ$WU0?-_{dA5~FH8#docVc@H` zlBJncd$RhAh592P%&$&1iyQQoBkkQ5*+cK$iv%P(6Fogf6ZN$BsM{-x0xPOx55c6M z4V?tS+|mku0!N4yULv}lQMhdT36v;$^WnZP%~>Uvme5s0VSmoZu@_Jg+nC`2U}G7c zSP%U-v*S|S1`)H5;BCDueR>hc&2`gS}RcfI0TXN)#fWYl1Q_Jf7*_pT=pKT$0>2=o51@Ilq z@-nUSC>(KClLNtxzxR~4#2Ou50pN=qPf?VNX*IRm=;bxUeeCI`b`fR&hDVQgv8n{j zv-lh)JQO)c6UE=64uOLj=H8s49Dz+(xu}qtKwt zVB&lf36t7aXy{ffiad|8SqE+f69Pc#S+-jjCJ za~UbA1hjCv!)CCuGl!kBgg>J_FUa=hwQK(sS+3_5cdgo@PXA(8t#O~rU2B>m;?@p% za02R~bWcWamS4m;T#UHs>|DhxjLr^Oe@D!oD=S#!5SkII{RN526h@{1GX#N@HT+5< zuib4+N%8#OHx7~J@&qHOf0jqeB!Dho!oJXv%mQ1rFA~D$-4v3!-~$M@jfJqT!AmBi z)?VLY)4K#J!+ydp@BkX}rKkA)PNQ>#nWmbAW{)u?v0|=bY$Tp?&#S*MFf+-H_qPJ0 z%xl?FS>%a8tAYmPJ?Uy{aV6f^WthOrq+~v&J5vxlQePdy_kwudH-)K+jsFOwA#=#ze+X zH8R?+@;8RwKySyi{*3HyX||%Q%lc2PcGD0=16_S<@u5JhDV?;K2Bat#*BRsdnYvHM!?sh6yg=*Pw=&xpkVO(49Xc6oi6gP~ zX={+%OEI3*-br$zYO(AmdX<77Rm<&*y4jP6I|uVsu6{bM&LvrGtx{mc&tCvvS#m3> zVfnV{@EY~JE62cLX$Mz}>+n}N43yi9g;@n-?u2q*NW_0tV$fgOt&t#hajVXi7fm~X z1;(3-no2`Only?uinXjK98_ckFqQ+O9_F2*6Csr5oczl>R_JHs3)t%HRPry}BZ;3~ zoJI1D*6ll;fp8Wj{8^6Zv-zZXxLnw%Ua6^}n8RP*QKT1Bdsq2AUM=r}>~Hg0nHQ=m zCtbbS7(|z}cTu~1OTA*8mSnoZK~b1&fqFX@yTe|uyt$zwnd!FA3eBc6jBKXT?xYV5 zrhCjtd{cB|UmWVpJ!&$Lg?=t4#myxur6+iQ-V4B*Yt3gMyJC_pVt?xLi?(epCfdt>U3+&D!ySrhh#7T^(%z8Z7w1Xw zN)0`^Fb_#yYq~`_qZ(d`z$a| zI12Y)W9CDcAae6r+xhN;1^z@2I;N=@G}mOiLaCCIP_M?9&yY5-&HjgiFNPslVFw5{X&L0irH(4yq)UaEQ7SC8|3h68t%tO%`Va1sDszFR zdVm`)O`8$tqrN36#_xZyrg2Qlh5y;7Nx@uUbVFFyMz>=1cYvDBTs1vq3EC`j)Gu(- zzz^#X`QB1Ub7+4oI*P3GFz;aX$N?S?tF`q<_?wuHBq`665W{&-wOML^P>YKn7=Q1db#Aw6j&-b?_pvPN6$L z3OFChCeYFq8Xv(P+X8w^Gw8`l3@p0OPm}-UMTVgbWXSAHHv3_TT`N2Av_-@~`>s*{ zF3xoU0&CNG4qM$pOytF3#~Uqm;bgp3ZET{#-fw5Ch4_5S;9c7NEYbpG{}<8%xP{b6 zx3|RQYQ}?6G_zpK%cL`}hp)m#wEB3ao#FRlV|!@xd7W*d{v-3&hOwXP;jXqK0YlGA zpbl5G5hr(MIX>_y#jlY{ANC}3-R# zU>xnSJ*s$|Rt2h4@Dg3g7=WwLtxM5y_X>W}k2aXmi>-%a&}wdS?Aoaz;uL7@ir)Snt&&dWE{Gjd584BU5hC|BmE`kg*aP?+KU?%k8JY%EBVq(6vX!n-kyNX5@-Gra6D9SkKvArCh~ zh^P+$Ob-V47ay^@igMM&m#~_%-NWNEH8EC;`SiYS zXFk#8)_VZ3(`?!Bx^;i?6wZagQX{R@S<{23`*bk%!I%6vV$&jsTf@*wU)f&=qq?%7 zB-04ljy|k)CFB46@lbk`t6C#TpI%qmdYjb=#dqXuFk8^_hQJ=I8rcMBg?44XG(7hq zO?4C{`Mb0aR!*8i8B;0c89R7m#fN81Wpww^QOL4Ly5^nF-fr;pWl-d5z%_i zx7wN1r@OsQ=q!0TwtpvHlEfTY_(Dr&zUcGKei8$Zph}Qm?>G8+TzNkk#>I9o4Au}H zDh18m1Z)pdWYA{JnlfJ;+*dXz1GSMnpuw)%G9AD!lK6m@WqNissFDBoFC~=33aTzXC&)*1s6Y@&osTjazH1_Yobe+veOnX8a>@({)2AEMpe;NAcpt;< zm>iHzZsyb|jA;gnq@+J!k&p};D~)^%d(Gr@zNjL2I(_}Vgo}p>6Wiv2=Z2Yu{peDc z=3ADl?x-nRX^>{h^m6P`MMKHseoG`^@(u5CCwHUBrqE^DRg#gQwNk5P?7M2L{z`0^ z=~JnvO#AX0VK|;v(P~Cs57A5l9&;1i^;S6Wxbf{P*x-y>u)7XHEvpNyKCCiC|28M% z|9q`gzTOlpFOt(ZCfL`Tgi~CV{jqez;7ss1W2#KoGkS<|&a7^G4c1v9b2Uk0@v3ut}JPa2}d!_BA7eK%ZZd@0; zp`OAVpYaI{%a)Ry7J2=$XfWAB^13<;%JOPZnqib~Wc>jNOn~RukIZ94g^lQXc9t3V z0(O#o9~D3SV$nqm@tJt+Voj40tv`A}>Dg(A%|R{_Y1=8tf=V9iU}5nK?TE<1N2h?g z_FH)Xk>yZ!L7fXT*c$00E(I3+q z+73|U^~AR2IBM?G*f3>WUb(y%#!tN3i!XBh2k%RD^F{@gY7DuJfwn zAff1g^bcI4FblT0?1e!`emNUw%nr{+{)Ny0&tRIy|Mz1xGQ5kVB_&;}qmp4^JZ4Yf zY>}!0__rS(Q$uB9t-mCP|Hso~{}%?Po4h3@$Fop7c?AXTYE3UP>F)K(&6brk2H3<6 z5j}s0pI%v{Z<@e%xXB=(7Ak2~#VSzkVn0F-!YYvb70JG1w(j#a&Qxh-OGgY?%RL~D zF;S+Yt7~%b^yyYM{5PQEbcKL?5?9JOUx{CDQZz7Uc`iCf zB-k!6a_03<^j^tZFPSEdj#iZBefq<@&8bRr(dd+O4p+bOM#@Hz(`V*+nq2Tdg(1~#95inY!8%y9Y+HZa`&Hg=grZY zoNN*k`7NQ(bsWl8oq$6#r@i4uX(+^*sboJ#zyTE>W~DUoG}*WWup)0pwS zoN}C5;o`Qsn@z%)w(+HE4|eqYn&x;{EzgxG<|5LaG|f7KhuHfWGUI0scB}pOAES7WKaukI&wh zZ#KGLHvn9+@G7uIp$H6@eNSfE$(`4h$cOd1An49d2-K zd)mjb4S+GY5bXbv(X3-(&gF_M`&;d_uVo*?E-tC|Ef+9-WfmAO=a6UE7*ETt<>`jT zuz=p-D-7!EdvD!$CcL^>08}4RoU~^&-@@NAJ2ry%lXfLjQ`2JSYsrS4F(ry zWd2W_6TwNp0O`o+2Z{XkK{$!&+t-v@gNkF&ETkQS39AhlVis#$Q9+|W-y)HL-Aa;Y z2?LwAnV%;Qi*yjz+L$I~49PS5@;=y#wH#GH6-Z%R1}f~5Q1V+`+(LIa)J&@*Z;wuf zEI;6&+HqswZ@pTAU81&O#$!F@{0b6=1^#xT!EARaOAi1yjwtu;dy5EFU|?@y5GR7c z&I%|8qp#b?&Ruj(X9T9Z`|7Bz^YKRBpeKNynE!j+tCYtvYRA)mf?WvNb1ZET*~4vl zl|Ib!tr4TqJeGTFrCKgmH;9L;#f{?45eX5AWn6OrakW?cumn-Wif-kMMT5 z?`D8}sApSZ3fsH*cfk60Q9;_Ri-%#ob(F%*l}VO#HuRHZ>yXiforET zu?jIndQ3Qv-_jhmL}h$xG7_SE)*fN@uW@tcs4_(+1i+(n6Oh6?d#S^7 z9#L+@wz3AXx~p11x|l4};m>__qe6xRpq+lY-P93MhppdxVUO^^p)>`@r0`wukKFuB z+_UNoi2T{N+GX0DC0tHeq_Hyl$18fU)}UdCfVMq|@Hyu|H;mJGCMEQUeE-(^_`yh& zUjo>`7mM|1Rv6T(H+DKuVMA{}_d3s>S#&<9d9B&1(?;KBvE>uxE5~pEc9kUqKX02? zN+^VI$?D?@9jb)%kCYx!IsHU`JwO?>kwCt0cVH6WR67EIU_jhqA` z#MxfT8DT@_k+X5bG@H10b2!6pKaT@sM1o`6cY=4n?gn!{ik&6o1`5mix#W`y`D;6@ zcQDUb$S1ehuZ%@M$Q=d$5*%|c+{X)@I{)%5wDqn_(xLg`zal2u+z%xvMbNhZd_8rn zq`>d79sB824_!+%%LOQ^KHAdpT47C7++aBUe3_oKddIYGZuGl#wUz(F(^q&!`F&pl zL$`D@bV)ZT3=+}}(%s!5Fmy{ur=WC6hje#Hw=_t1*ZcT+*Z0R-^AGTxd+#}W@3Xi0 zFO~pj?6vuZ04I~f?9g`oM1ct}JhBF7fIY~E9)S(~+`E@@3_GtX?CKL>li%kO!+LW{0Gz)ll!dy4-9QWU-~_C1M06Vcnf*=@lHPE9^5Kvy zXxRA0yb{F$y1uYg(fq9`$EjzDGLSGWrS|8b15hH}y2KlK2@(z00{~!L!5mSzdVr|@ z4v$9~iO-gj@6DNTRSb*W*%v-i(0OEjp5s|XNs&GAcT}V#%v~ZQRwa`D@#PWPlRkW6 zA>W9?z*H7P&0YI6=4A(QdAIpt_wh^gf9@!|7Y*%CaL%AZGs`UaJK<^>sXkQP)~y)l z4#klH@rnFY& zs9;BZ>{}Z2P`+gIw+g-kMv(>uWnK_J8(WuGW{ap!mj4RjdJFc{|9@0gB&?;{^qM}{m8D)vk_>X)|}pZp{u#_0{#b3 zE%1mqQXbet#HVn2u>Q7lENHh)E~l5YAD{@f4Dd6_R*D{`~jdC@+?e z0@ew<#aIl$Ix_N#k$4x6tjqFl!V_FedO0OqtAN>YoKc8f`+GyR-LsuxYoI`a)DeQD zmPvYdAF-L&AE?tp*Mp>o6cK6xG&~2JZv46t5dW5UFjP<;2uqQ1x;?>Ye`g{nk!8gB zc-&h5fge{zGCtuPNV(YdmW*BrIwTI|$;|NDd?Oc6|G`vo^Rw%Ksy1v;03OQL#JVQC z^6U?bp3-)loB*$C{nM0YnNiz^2)b-jyMZ8Bi?kB!(6XR^PelEdylW6TZ1vG^^tRmi z$t&V%Nfcx@u)H`JFoe^gIeu(N`X|Nj>uwTKu)WihuZ1JzAgoNRa@x8}JicJuXy_Gf zVW&(Ku>rdr^c$BL6h*y#FF@1dMZ`-hGXL${boDNxTPb!!mH`zo*(4yvo$j9;B*uB} ztki;Pp2sJ6Fw{}w;Ro|atFwYmM{Yk+o4EA!~4- z2Rs3O1Db7uof)36DI{uB$e>AiSTh&2cnhN((JRtbAyl_DM^a$zRrUc()PZI z`fcPovXR&xr>;SSkkR0LPJYVr4e%Km7?k>=L`IQQ3uD@SeM9q6`VsSZ=c=N|xi%~=pv+jov2b!~h2*;!7 zN7wP5QNUiXiU17Mqm)*;Ufcj;B@icv#b}>ZlI1=54vsn^l|!psJ1$xn5Q|2;K2_rP z7IiC7eD=(Vx2eUF&vid5b%TFjle>853w3V^-Hm>R8&2e5Eq>DEF1x$hYLnft zcm4VVqaG>!M@5*!Ut|+)(Ej;CSlWKqz-^*JMRCo?Q=d&wg#90?uDB{RSkB8?^CEj7 z;&3NCDk>mmifDo~%54Aa0)mQ z8-9cBg(ZAL+xu|&5{(XLGPrFp)uiCpw!RR?3|AQ{J3GRHOCVU#cola-U*W@(oX!8K zScP`)y9vTG6<1R{XYGoUvGMvd`j>YD`JeN{N0gLC%CIJ`HpHByuLCt638b}pl)ay} zzsi#RBHlZS*0gYWXkcXhK3%Mz_Qik;>{Od!Km92-b-?j-Xmqh!wN#y{WUcDwsE#O; zoS^Pt6-NcJP+3u-+NiLA!DSDa&TDOVm2|jju}yWbzL;wCRDyEPgW)U~A|=`W!`_MY zV8Pkb8*=e**Q2iyxA1@fO2U^{w@VBiUB6kDW8M-+{97Swj;?;X0qwZkEOz(TghAoF zv~v<%Py|$WxA5uiR8u5&3jrSn76I{Z0%a-^U_bsNi1O1%T0fGP8|#e^wnm1^>u<0r zrr<0*3wJ_R{5J|XOPjV9>(^D2>XlRw&3W&A6!n=QMN z_!4@O=Y!CA*+t$@NowAxc?AHLJV~UzmW-nq&Yj)doM6#J6S1QK5$TA*62By6qZTT1 zdGrBix)HqjQ0qc~^h%FK_D{OP9ZtSDGgFsgK3CuGWQ zYcp7$!;SzRBmpAK_=P=i2kl2QS?{IzjJ~7!MTOP?J6{T-<#x0tn~Xa>7xUFT+|+y^ z27gI(A%TTY&(BX&U$+DL`nHlNRIG89_6p$$+9 z)u4%}$nLz`M)S#?>NycX6o&iG9>gEA+rm@XeVaE}y>DT%(#owF822z^0?!640BT0J4pTsxcWC z(wEMsHs9xSZH=sav84xm316irddH4-|Nranpi zcyda+b|i?~$kmZl~prWVVc2O$o52s=eZz-*@} zUR4rZExJWpz9{vEmk^2xn7t_|40lGONfyX0EL-eH=tbz0+&w~LtcBZv0rf+PxRKGe zJccEa(fUK}@&&FWmrUgqxT53M{*3go>}d>f*v`FO^t3CIjYEE?BGQBbmKm~ItW^m> zh6qVw&PUZwUtRAryzpSqegz9u&ppE{a%R+4pF>&>-s!bzft}L&$-Z1ZPr_)W4x z2=4_S5^e;6SduWmUrtMWgYAnQ(c2+pziOByCa6Hdyq?8Ymab7%EY)0Rub4opwUrrO z#S!Ezd|A+~zU%jxO?U8Y5u_D#!`%+U_g?L@?~*c)(&|yA%lGI|eUdwaIQcoOEw3q# zEZM^&IL`4l&v~-^9;_J9Sz`4y|9S?4mgj&Ry-3e87QWE3mX_49g9#sU#7WQ6p$T`% zEjHH4%;&AW0XQzSp?I9}XGCI+W=gLr6@S3{FtWpnQJ7=5C?UMX-+(VJK)R@U^WdeM z3vG(~fEObf^I3QJX*E$}tm@{=E26VONZQx4%hv3~1ac+W@Dh{}g#eu&)17lumTs0B zs-0QH?(hLBc@%b|!zx4LTOn(}zaXPiA>1ZV1I9sfhVn7)I7R5%cK>t;I)fRHk^kUXd*jfA@`g12c-jNs2}RFfV|g=S5GBi+Dq(wE;jzZOjJJs$`3xBut1GA!w$NC_iqt2l>zaS>nk>z8 z`G!yfJB!oO=XuBbXUgm-^If*9aG%FoTfOWT+%!?B|Hrm@yJu7qO~XvKd4rs31UeEM ztzBrE#pYL&OiQ)ZpioF+Sj=W%U)S&cS@-kb#mV;O{Mv3j2CgT5#C`3LwuxUci6#;? zkIKVRDYOC#$bIT)Bv(R$vWJ7-!d;TM7;TrsP5~b2*H!@m;QAmuv{Gz7E0vEjXoK)c zSjCIFZr0^!Gj=otb?MSL6W@OnsZv4ToIE4uFMcTXy}wt1(nCM_v%Wyp*BV|FCB_ta zlVle-8c+v$5FbX`C#$ka+V9Qf$_Jo8HJ-smYNNW<0HXO-UuPM5g>PW<;iRvK7>9Cg zQpU`5E`^31o%xyr^O5#@nljnV_2vs_-&KFfnwLz7;PVB6!gyO1 zvfa|v+CMJnE<+Bfyv`uh0_(N!uH77?o;mvWaww}6Ff3UH5lR3$gO+ zeB+Xg>jAfnw~=v*Br?wuKUw1k(F=1Zi*Xe!AFu5i-$U5pq@VsO$^Cf#d_DZXQ~G7S z>oHU2Q}p0)Sz?8$vX0lq#y5&+{v>;U75L;TEkU}p@v@o>ZP+Lb)#%mBge;gY{q9Eo z5BHyw(sUhJ>u%)GxUpiA#A=0YeGhKY!yL|q@EEBrau$$T3d z7OeZjs!4*Y1>~L(!6?e5c>#{owdpw6S9BD4-F>fso5E0z{|l9ZaL7o;w5wVcE<_^t zTW9gbn#rrybq|n8i&G%ubhwO;k}Q54ixLU} znDfF6gR0>`#)4lFV~VO0FAL~h-dtitd%r80({Y~C%L?&cY;?&YBl$=9&pzfETeC| z;>DFpODha@(yFX;zv#`$Q8$Ok#QmMC`Kwn~Z|DGvKLAXd^_xVd?5^g_o(m5_z;1XM zbvNI9m|ihJpJPso;;+Hl*^IwXV_l23+5@htYsQgAK7q& zik->jmWzjDIqUiMinpr*K(moadx@n`RZ5Yi<>FE!QAw0l7!}looYI^-qsu1UIT-eN zP5_x^R`~WQ*WDyO6QViNR*IN$3pmqz-NmG~PekHlTnO4fa)anCd6G~YI;n&Kf3yU= z(dP-D=yI{o#^C~ZRBMXg>Jabl$thqrsPZ5f2pjTf{dZn5>{>H3K@$_@fXBTSWZnCI zd#lY?O_Q4i@>y}I(!M=Gn*!hYeKO%Tj9B+)yl5MV?MW50dRt(`3V@ufI>i=ble- zv4wz!nA`KoV`xevdS_DY)l&TZs%e}j?co4Sh^q123FlCQIE3ev;AeriGIvs?o?`RO z(c3(!xP3CSi@Ntxpdim4Wcw0h~MiLE`uuq#XSPo=2xMAGAuBr(72&( zBFR_tkIT!LYn7N>7_0( z>loc9M|KBEdnC0=v@8AoD8zRG^)0e{Rt1&dgDgA}Y}{F7uOE_WZOk3DDMV!~$cWf> zN>fr^xP^4$Mr8E7oF&?7wUTI>`a;~V>r3X^tv9CQut>xgc2{(`*7z$=DW@fMZX=P^ z+lWLOQ^N`T>_nx1$A1xUozNW1$nJ@+{WvQcxL(C8uC{O2P z&2ylw^VO)5?3Iz6GMvsgbi1aL-hI{DLk+@#sv>WHr1S?DFDUF_@ZZ^8{s8)9=9p+U zR%C9c= z*kt2rJ?G{KonsSM`pgfW#dLi;AhgO|mYsnP2 zpR(9=((Larh|0*fcT^~eA??$~iL5AcDxna-`^Ialf9zsFF-Klb?Ndq$-RozH1lszT zf>e6hy{}^h-j#?zTu;as-FYSE@RU@}vd4pVJ`gF(-J*xD2i%Wu-q^AZ$PCQw${EXZ z{93u+ydJL$+{ZC?$;sGfdzfvb8mPc&ElPg`-f;{lp~Z{BuR&cVZ{;t?IAyRt1lk|w zsDXd_Je+^y4TF1&io0G!uu_C1+rVW(oD6Em^lbp9sp=bkh_%>on=9243Y6WX{~~$h z-~%5tU_8l>K}*l_X!LmZ4YqY1=yqPW<1Hz24GQ2jyG4(*jirCBug6C?J5%L!is$}L zawx!eAm1=P0rrhp4hA8?aEN(NayiF9S3B`5lEJbn&kA@UH$VBfP!5YDt!c54;3~Lw z0@LQ#H!MROvxPI`sd97NNNpaevCm*Oj?xmk#rHCE&9v1`DQMiWq0to0wMR}zzQHo9|Y*up;*Tv3R&Qsl~ z;|)7`0kzs%;!s}{3+EhElANVder|@j|EKvm@kpG7%fy~skJvH+<2D|LnM*^jW99b1 zju_x_Rz-b&7@gl?usv|RJO>sP{qLDZAljKs^LZ1H;fzAZVjq_pTeL+hF}mvc>B8^d z`Ch%&dLEZysT%aooh-8Ij!4*dq_ui?7ae|Ymj)@`Hn4QmD_$2p`B>p+sO3drEw2w+F% zSHA#ziK+VE06nU^w`#RdaQT?Imx6&QRwpwyGWXo*g z)JSs2OZ5dFTE$5f%2I{)U#3LzEoN-A-si@1?7R=0UW29QEjKRi)fYW{)aw{ZZvtqy z4fsWnX}Q;C_JtgrN!9Ip&PUDEK zCQ|MR>jqzUM;IzK&^`kkJBM2_@9&Aqe^Zxs-;J=`^@Et-5Ht0@(HA**xd!L5;(&_< zL#VGNPCGbxb`=0^`mvJiqq9+3|n`*ur z%pcNA;$;j%^cDEtgQj$VvT{{OF4oG++WlPdPE`L^lJ$ULqvEi5I4~h2bDS;tJwwA# zS;*dlc4Ez&_qO?NBs0tqKBu=+tc|*Cg?#Rr^AFi-1l-C}G2~Q+9sVjG{qY#2en_6d zw+!~o^LY^v#9I!6e)^f=J`;Zk%*nhiXLnVKO{qS7u>Qr+Xcvz}DS>c+vUl?(Ae`5J zF*$Nh1_JF%w|q<@3;80T3NsS3gLtMW79?vEX5SH5lB$r0*v!f?$ZCYWYxf^=Qqm9D zR{h>EqgkVT;@)Q`ysxoM{a+<4!V-8{Z6DVTH?2=G9yV#Kjss>>}jA?t&B zq!&Ycp5M(H*U&anaq#o8mmd~k_N%Tg;P-{=+mcF^KS;ejE-{l>7K`BpBFupPSz!G& zzyDRu*FgNPkZ=tbrLbVX`KR z=RK+=2SqHOz0`h1C|(vjDzu8s5SDIKVfs4r7*=sGT+>tCY`?o}Rncv%dL=Va1YcZR z`UAsj>tDZzEN40u^DF9DZWuGmw^eQy8y)W@`m!aZdwX-#3S)jI03L~HJRx|x!j8m_ zFgX0!0u9jtF32r19#MmL&-Uz!g3EeUd0(it{;i0Jb{nP5Ohvg=;>o4*_szL7IWAU@ zC^BIWc}nDMny+f1)&(sFXLS{MaOV7<^wX%^GiQ@fY^mKT*A_D?*)<4K&aPuf29z}i z`J4#o<^KpuP`YuPSdi{Yy*}MG@*HXDYI~x`HA~bZzq>n+9#;Ab2;5=$pp zwN=tPsU7~)$sVfgS&!2oCfwOWjVW)!XrX+4<;kF#XnKG-pW;rl-8hHXlv5FbDbq5q zb58hkLXw=A8M)w8qbTSKSgL;aQ!|vHHnaP53Y8YBt?Ha5s*9dvj-ZwB>CdV^c3m)V zK{Xjk^C2NtlWpI*Qj`7|=j`+Q*(N2G=`HEp&SPj!J8vo7-AJ`R-_e`?$fI>xfh_u1 z#+>kvHxnuTNXoTAWR5H-f|BZz7|9QKrv;pe8W6UeumN2k0!cd(xEBr;c?QdFt82d0 zP+UWds2dg^9&5~-;n@a>;v2_h6DQJmvY1_aJ*09zyM1Ez=8T;&jgWjcM7`Tc)~PyDpioyKa?hw*NENj&U-oM z(QYYxf4ZUz3Jv);4=BdB9CxIMJoZS6H7obhVs?`p? z%0AJ6OoS13^<87{cZ)H~q@}&g^^rGU&pf_!-S+<7KE4c>M1)!)&kC=_SHY9;oAZlI&C*~QRXrSXJTz9OfuGK zYYf&~THMye=Fiu&+H_`FoS&)!Byvj;(5hPLV}cIC_5Bu{O+!zL4iF{a(MGR`d3}tc z@X%VR(3y_h#dT#?Ms}_XIjbcgGpMEvShG0ZdNkb!q zGog1srIetHD-E3sUBH%oVn%O#x)Dp})?oF&4&uRBHq!YGiIR*#Or8aG+Xbr!=qZPZSd-R39;}-sajcEW z(y|jezXWJFBN{FTcYsY~eCxQbc#!`=v$~J?@bW9mpKN%`sOd%s6TeOCT8zk2BqG6y zpNx4s)9_lRNC~B~!VJolHw*Fh^<2&8xzFXcRf$2Le(pa%uL#;!rd*W$(gF6hUI_|c;za4+`stUKF*9(Ds&)4{U6?`H919-JQ)lTe%u+qKTTdP3Kc_y&iUBH=z~d%gVP z^|B~6jlkRO;P6$s%lg;{?V56R2ppld{8eIY^x8H0{sFF7FNry z$1;jD)aMHVO9a?=cd9fkjQ|Xz)~6LlP?3U*VLyNo7qm zj)yjbSxliUnkG^(oT9AMWjy_e`0&fPbZcAl-?P!Olc~JiPPPsx!nK+)ty%nEA3Y&| z;96#^jcn69Sm^tz9ljWH$$ySLt5t1yhCzJ8+l#ao>QHJM|5NNU>@HV>=~Pd&Qrxvv z&8k^fH#W9<6`0NhpiMPCKp<&#C=WA{$SP}y23xNIji3@-z7O5@5-0)|FI|x}pXaFs zy*G+|pI@|-sn6pkWP1p%;r0m~&VGG|I$=45?)-YyqQmH&i z)0W7VnB(CmY>u5Xn3DPSFt-Ssx2|jeH3RQUu_Bztcz(|j+xv~u4g=%Pvq!Xv+T-pzX#GyV2Qf~vwIQtf?GK%?blCT zCemGMyx~L2x;UD%WwHTDmhXoBXFj$2==%!>Hc~1Dp%9igVmqkJTAe~)8@a9f24SX? zwe_wLMs@Ui2%pYdOlZANS9#5))#ON>;ezR7(daE%SRrg~L^FCw5kLs}zR?CP7))Op zsxig&?y3j}(M)2frmw(mXKJd|M_k+4Lx;;S0yeP)eoAPRj}(j%nsH{+aJwPWg zn*^+#>sN;qTE&lO6{^bcYH(eAAZ7d&aW#I^$^Ld7YJ44X7|dyWzT*t!gOPt75tf;K z*fC$K4xD=?M))F%+w9ryWBHG#1twqEt*n;kR^Bm^CPIRzrWy8q_Up!pvQN7ID;3-Y zch67*HSD4!^tu>p#6&EN`1lA}A&U(A;sNQQ!MG6k18z7Pxb|*6K!5?k2yb5CONGTo z5GQm?^a!R=zlKS}{mN(jP~{YS&^20-h4wyQUD<`3B?PqKYujGP4H4i_DtgN}Us>S@ z?T+g`J+MWx-pmSLhLmHsn7Vc9;kbj&12|Lo!5*DkV~pQi(Ja4FNSDIxDs4$PL&zUa3?eAxZ6_eR=f)2j3j6qgc|lRcKFw%{!)*T zc3~XaXaiRjbGd+uosS@m|FdPq2Eb8mIP@CW`(ZX=v5<+F*ubjnfX`Ddd~X;Un#N7g zLm)}{bw=3m8S!({v5u?F&-QQa(DHAVtBdzs_Nzpzdr&OWrr(Se`|{V|+rx*Q4r8NOg7@Zdb)#XXH{8!%=^4Kx&<1RRyB$0h;|WqIvy-w>@g>O`|m8* z=s~@y!L#BA9_M|Ndi+;Im}g)KRI64B7%W+Ffu=ErF>!p4HEB?u=8a}IHLE}JAi1iy z9oA!a7@!p`VMjBgRQ;fv^As|f=`$Ge-tVc>FL9S`D^phM(1vR($r0!HAXCN2R}#u; ze|X$ChvE+fnHtpG9F{#b@%59m5)#lty)&)`9==&)QE2_IaMZwl$^F}(?S<$BkyuZ_ z^%;C+fUMqb+&Be{pTDe#yv#EB zRdExE{avRLgJxFvLy@qQD1N+?_Z|zLlVPjM#*0O;>_?0ZY+dh)axPfhg-2A3f8T)Q zXN(y57IXZxyv|8N;Q8Y-CEF8oUA{WdOah&+f(AZ_)L!h0t#7=b>C zr<8ATmxsRK$66g`p#6!u#yZbwSO^hMi)*0ewV^$p&!gm zEqxKucXrjsmo6VjW77 zs>I59E}d6vb89fOr@-juE|`WNJC~9Oheo;j!H3s8^~X=5;_q_StyzrOJ8`G)=~FDbo-$a@g>S#GgFlkPa>B|D4PVdcMwjzz zRT+UHzdCb`8aU&X3w}tXn{a)0e(m&7Wm&?oDg;F@N{u4cE<+VG zs9-@t-dj%Obwr(vBjO$D!fKXMD13vNwHQgnU8QAebs_kpJW)}NhEz0|BXi5q<9zxk=Z(_r^|PyOZP4IkJ_^D?2R76`Nq?L z0)-AF(gv&iL*kN8Ethpbcik|~X-?4hPMsuIdkp{P?yO$BHKKla(&WQ7&rcPmS+My% z0SL#58l7oXEpdf4V(UVWy95+%9R;`jG4q5`z&=BH0RG8g<%ISgPiG0oMmN! zx`Q=kg|_SgM6}Tf1zt=7XJGx*uxdBaI-n!+Zs7=PS=w(l%rYp?hSDl`>`;Xyx)dbP~ z&;9OzFh$4a9TMzhwLeR!b`l>vJSVyJ;}tLc*8t!(B4Sit|8l!9PR)b1MCOPbTaJz0 z%a`KM&)NBP)FbQmNc z1T(bQqO@TZ#eR=kb38BptIQeg9rgG(7Q!Wj*GVj0V|QzBb0aB=9OaLxFiu2v$sp5seuh zO|lI@?gF|4PFLR-aYAO5>lH)I(AXXNtSmY|5J&_wWe=_3vE%5j1);b8O`}}i-Nr^q z`b#NLP&R&_K7A>Qn7Ep^BwJ<>1A>RA1w}==XbcYm0b6wSG&Xf>i|b5w@7jSzsV5Qe+_60v}1QNPM4n(qrlRtrF@?o-VYK`kQjvdplPcegS&r z?#a8~tA=_U;Mc<2V#UCR(savPpiYJICRD=ss#~ii((p7-VkzLn=)u@Fubks?*MvK_E zfVH;9WZl=f zQpMPsT-RIAK?PCpF!Ob;(w3;JOv0DG8_(-Z2{xCshvhOF<|Tbuo#S4tJ_;{(a&QHu z+TyA6yaU_eJ_d!vwTJ%%6dCTLB%ON%?c|m0DB%YOuMN-XeG{2#2`?Y~9|e3Gc+V!- z|Ka?<0$1BPM|o6P>`e_&^@!--7uC-=^EIk+c&zDvXKBEu_i3xiScD6z#EtXR+2b4a zR~F`AH-YvxND}@q|J!{sq^cf_Ahnu5Qe_nS`3A`F(1{$IYvh1Y4NZ1?@thdx0%)#` zDDqn4w449|etnrxspCW09UXi)c=+hp;zAV>h+fk~A_+%$zHhUNU=?=ccS|V^%>6(? zQS0Ldq7gi*PEb5iOw*T83u)w11EM~-ke>B4gA2!M|9q)zX4Ph2nV3^PL!GS6tbjU< z*i-K2Xf}t`3N4+&^2jNz>ABYk_Lr_ykU!+7_w=2emVoI7F;}0*TaKuREQf!uR~qqB zI}lg#P$6w~78sGiEnRIifwwv!X+@^=bvE}KWPkk~MLlSWoQ>lqxl&K_$1dhYn?Z#< zX(8>M{BJ@8$q|P)vJj#?98x3DSCwT4<|o{Ko6?z8w#p33jJle#K>B`AbU9C1#E9Arj-K1YR)0U3+rNevSp# z8wN>%bqS@|+hRYyXyb_fSz<5hJkZNd(V(l-#@`n%i`ivPtOi}^gcVk>uV)TQ%B!@#n748gbO+zeLW8j$aSJjB;Z@is?4eEcmg(}AfKHy|CP@n z5`HvmKXqXq*XD0)Ki-|7B)`G1pW;ihR$dpX=>d3X5BY_9{Oe?s(c#1KOl|I6K7g8e z=w^$-W$zgOc@_LtLGG;2`3_FCd$w>%8sqKHT`55Sk>8I>$&&VQ+9;F@HL(%20izpg z5{MR=z(k)N^vrwsY5sN0SuZJ`FpPD}zws4B$iHq`>ge`&O*JZ&xloGktD1kRsWY6( z#!9XDT;?!}<~vRk+O%ERF-wpjpqxWt-Le*@Ct4>I(?O7p07wMTy+%i}H5pO|0}<{dILn;l`L&TbQTDIpc$QNQ z02{9ri3E}-Qu)0=aYXUd84`(;yuu&vdATX2^|%8sT}#wZT3bgtsjNWHKz^o_g2cXG zT(GlZF2Gq<4n!pG@(DKz2MY6`KTOBnnm^nJ&JFpB^=2GaHz#b^bx>5)BAyEdh6KCQ z)u>tDpXR*EyfzeEUH2Tj=j&c3xY!wE90DvvWomfhi;%z1d|4{*-2~Qumkx?1vh}FF zooUWyH^)A2Uh-A?tj-X!ZH`??NBOQ2hEpqW=fv^OTmJo^VuQF-4CX5J2r%k24EXhC z&jKI1mu(JI1^M7T)x^rB3n(!v5C;Ish+!=rl3@9z9^>C7F^@SUi|aI<@3x-aUoDcD z1{Q={{Qcn~(Sj(;R)tY7ES10#`y6{&6Qrhe^B@v_*sOd+OBUg`Fm5vWJ--{24x1n9 z15LsOSmFc{O1>j<7C?k1-&ENqQc2iwj5hoh4BkD?!gG?Nvj>UGNrRBl@UTpGF*Lbl zrBV2eVZTI1PMZeJ=i57B%5x|fg77fWV;e}B-;?=5n){RU-C`;~Upjxy@oP5x1hpT? zM?glT)hZ_b;t221bVQ>c?1o;18uU^O=Eyf&clp%8v9LjUWex0~He$y9`iIVrj5Rt7 zDJM9FE}y3t0siGhtD%DYu#>-CKaeI9EZr0#zND~JD(X@mQ6zwBRr?b|Djk#RYOjY_ z<4rbhygR*WK6^>BPVTEq3ON7Wp!aLy*%59ih6U2d9E6NMfX+AdyeLh+V?Ky22rM5q0y^;{E!czFDD4D3+24 z{iZmelMrKMD6}*5^r@x>Rf|)VSGpD)1NWi(_|?d!8*WONam(b1BIw+|Gax}jdl@5H zCCj^6lu3W9ZMTum*RSFQXB%(vK^S)9cd4f-F;7K{yC?|v1zQmxUi70|c_lsvx{5KV z1S>K!@?g@4l zX3pw#&-eXd${Hf?R`Ka|i!{az;JU+mC!m7-$9vO7|My+^lzlR!c;S|{(h{9gW+gZ+ z54Wg%x>ll7(i2h{$+YFE1!#D`O45B%u=k5*)~W+bRgFys!{(enhmZ!|Mb|k!9k{6N z#8ZI9f3^wP#?;qEFUODA}CQn7?^?32NVSi_Vp9m zKT`kW)3%Mf6}mQ(1H-|oT}Zb+X8T0Yh1d~fqcJe4GP{wH^%G@QSaKn|1c(AVbCY1- z)+OjKh|~C?>3oV1rfX`+p|Bh#EJ4FrVg7FaQ~a4oRnH3eZv@HJP4(Q5Ky zU@7r-#4~_DWI#8{^EVK8nFrVHf=!^Xf3O?Z^j{RD?%D~r)fwr5Mjg$eBWC`IYZ1EO*yujr$bG&kNO>aNx>+ zWXJ`a{EhY7{DuDREVv1{C00Yo2B4zQ&)qbSwaGkes(!vP@jiPAV*}QU^1iSshi&~r z-;L~*Y7lJ3SaxJT@53(QF<&euliq^dSDpS5(s!e^fIP2Q`hL_Vd~NAAFbE{tyDti# z(_+b6*o#g~3tlI`5F84QyICc)t#OjnAqa^**5X&5^CVu;RA8}xgq`J69?i>YJmz=G z__V7I@D_&R!wDHB5f~h9RmzU-t-m92SbESEIW)&z==Wi}11~c4c;Yea^4`i_3u)BZ zp;b~5TiB5m?TC&!CtbQ3K!B9$+)bm-`c?Tw6xq+zC_&|czqVYvA_=C8$URk4%?l7( zX^MTTUxw`tp&$pni=KZU2fmD*qoqGNM~pBfb=Fe%wG}?+c!;5;0?QSv3l`~UkMg-} zvhh#9wN7w_VZ&CpV@LR=`AhQL*A1>~D;H5@29B>mRt3#yT;|L9ZB>DI+?H}rH>yy* zBWsMmwV(;laG5=NIyfj+LnHV z0h*-#v>h!#!*@9q8~tWgw=5VRme$)>PZ_4<+4084K5FaCGdBoduU-OZ|(%<$`TRtF$qCx&t+n!;-6JjT{W%`%+IX+ zetubxT+zKDBQpNUmCtv^bO2BEPd!{&^*lb6&!Xh!zRbsGfloyFBg@?P{lC4SxbPL& zOJ)Mh)tx1o39mgLK5l>i$DL5b;qhmRseuOF=<7?1H1&73n{7kx#MKlaZ! zT+x>kSS+>=8!c-U$wr_^N-_F^WX$&qcz@*-F$sq*^0*T;KVf5qU=FQ@-~3vVNmUR1O)V~*EL_MM6jMAR7e_HgQBVCir!5)fw(4e+s^XFKXu z!(Rd2nz+09&2LoIrw-A4him@=1uy4Fa#gWE6vej3Id7g?IRUWqqA~7rPgf?O^*_2_ zE=~Py5=z54XKijd{e;sJ;7nHG_ikH+gInUh=JtgI13o>mhMN5L!mC~VI{_`W2gteRJJa7mDKUQXKN@Sg?~|`E1218^g04X`^che z#xDy48%ZCr@he4-aK0>F6R;21$zP?uh@-RFB0CmE!f?`Ft5e1)Boj5`^g_&x)geSv zl<^UAN3tDoj5er<4f!CF3v+jaMj2^n^D_vY>1Bth&B}hj9KNK$Xnms7$hIZ&0wyBN!A!RMn z)dJ!qC)QYbMI^E>4Oc~3u{DYDQ3xBT5@oTP6zUt0aY-D3^dN{w!~Nf$fCn7K_vvua zNq2K7YhvR`5iJ^Z&q8``hHQ4IM_Ppqc2^LFMtal2Y7`tZUOWecPvvr*mp2owE`r~q z)9+lx^S(z$GOPp=XGkh1Trq#u$NH5wcc{DBl&9p?kGDnaFH64%5@CZ2_vESMi0@1njdgV3P$7PXsUN7) zeYN|kPGACaF0s*Td=^?u*Elz@){W8bFz2xfbpMc`cv~xDU_2N>$%#PGH z2bqoGjqfrh+a_kj5iJn zs=kKskhqCqp5x6RhoJLDaZ(Lq<%Wc3PU_EK(z=xv=g3qiybMm)eRq@mf`l8$SmlD59ZZQKTkcisD^ZjuZz$r^IIP?WkWPAdF1>^$NZcIGhMid$Q=7TxY%i zft*@W)hEDcxy^Fm4|)A-mAlh%1fEr@LfD0hHo%$D)R|-2_>5c3wSZU;5Pe#*sxVM z%k#`b948Z(8S`kM4(+YxeyKhK{;E4kGbC*q3d=j%*20fAs`C*4KTJDsF*OUEQ}XVw zK9vhpjkBqW`o2zAUq3asqTHJ;3&A*1mwl~!ZAG#xdjA~x=7oCG)~Md04iDso_%9Mb z4cVsN`7+XKXJ(;i%p17*sWAJv?)nVY3^{(;>i(D|lVIoy7!W#MM{lapE$+OgdKE|b zf%#xJMzllZ19H+I=48{(b<}$@I*?JA0`0cea)%%@XdTI~WV(cgul}9xjg=6V#M0jk zaQ}M%BD)uw(4p}IQu-y7;qp2Nb-hLlpviOjZXWdYskyir9hajtulqmwniwRjAB{3} z@B@%tv8HkD^!?3$wCxzA0^1AwpVO_)k54|XC4^q^LFg>y-W3wEpHXmdN|i@u4blZA zG7vLmtn5filQ~9g=UKVnJVX%|8);^lv@~p2#@Q5uTy;`ZeR7 ziLaRt(KTtVc0nf@oOUE-yTFQ`n@b&1)k{psJZ=kw7FvwU;!Wn3)MQPtBbUabAJW+n zB7x-6#oDCS^Bn`}dqcw7>}f_Sb^hhA$th?7v+c!`xHb^ZuDW(LT`A^$<-ZB1>uqgX zbL2Z7x3a20NAF*9h&lK{8DCw7u2oIjCo7Em;CifGxApWf>Uu`uD^CnZ{%^qQ|8?ki}|1N&J- z4i&=_BVD^;d;R~Oa}|(NUKQVLjSb81U+7aLSJYM7n*Vp|>ZM!tkh_m6QS}|K2jaa0 zlecHA_th{NnU6LZV6OpjRRo-0&AT@vFFw%@%s?~jlZ6;cIDm|^G+{WFKTOflV3_c_ zFPsp>9QI6vElvlIlJrf6;}`37!eg?f+cyx~8dY#={yF}|&Zpz*UBQ2Z!xiv+7sf`W zx{5E<6vG%HkGWvHrpb@vqfQvVIqEML9qxVwW{W8Q@Kph5bnS^3#q>3ls{DySK||Wg z$1O++TDP?pNlRch;|h8Gz4>p-D$oKMZC`(Hx6y2TtFHD=Q5T=lVkkvD0p4q5Ko@)| zyy!IrW;qS zt`|FG3O&eAvoF^a>7_Yt7{!0(<>*@FfyenY;Z3UgK5VXxqlI2nyEHLlk9Z)O8I&zu#M z*Yc?f4#exHpTxhXY4pM@C>Il$TIW+1B3MDC`re$7faZ5X?&>PCX54K@E*n#YDR116 zaJ`v)npNykY;zDOYL>1_@($*wd!I&$l*iaNL7VEHsd3 z>kB!I3z&MZQWTw;`Th~tDXZ=FF?{FqAEf>k*~$p}0>gv!S1*@3eiAOa+{F{QH5lp3 zy7H(vbu}L8A#c2MTX6AYDln078J^xzIZWhE&8}j`ioJ;YjQkQe$K0Y!&y+bnIMg!< z{=JpWx(`qfw9hlvlxFZpbHsPJ)Yh1%weU&UX5LA4-csVLluR);Pt453VGN7eq;h!c zZywzDlm7S65ww4kI--8EH`}df9-e`aTkPkF^J8=E&+k1@>4a=HOJfexC6yc5c%DB0 zKpLtSQMH`#Ghnx#sWd^Wvr&UD*lrYZf7n^8Z+!UM`&6LT%%df}353kI2tH;qy#6ax zT?@!WAP@0P%O0l8d#_8%*{Bshu&^f1VvhdAWxQbC>1Fa;5O++a*1(~W7#CsYN!=2! zpC7>_xa|3S%X>;Di9~?i;|;^XJtF&LfU4U8|G6RW2+RPFhD?BQ7-sNkv9$59+I$3f%$y=(>_x?UBw`%m1e5al1^`w`9+|esW7|x@NTd|f{_B3x z1|#1I;#&;ryt6{Cn??t-ozn&l+^nt?#UZd#%8l9ysVkc*Eh$>BwB{wCz_11?d9v26 zkm6S+D8ZBLhLx@28ZWD!W)&D|6EeuGz_i`nA8lONRJ$Evd-#dSZTi|wKsqi``YvZw zXqWjQgILGYAu|CRNJ*jFMJ>G6QTWQv)XG$s{C4-d#q_yh2`u%O8|>J~*Q7;pfii6m znM}lHOUnOgT}a#l&N7$r!}x5#aQT<}g$45{s&g!WznVm*@+#%IsDIrVRNWzN5ZQEQ z?)@vnNlB7<3a^+jpsiM*V;s$HhAaCBD-nA1-d0yR9HlmEpVTU4MzNlggrTh&cKP9U zF~j|+ECk~4k5JlT<~Wmh@HDKca$Ub$@AxIc?GPgV@Rz$Z!YIU5@d_rE`wiv%xl&S+ z73#mY?Yw$JXn&MJb^7kRg%ueR+8lr&4jq2~o^TSClaKyhYhM%gFKzs>|L1h$$C}q) zP)2CS?@aL)mngSo(1Bi}vBC$XM&rB$61eY!>6j&Kq*9Wm&_VLh_wDjoGr;p#xM`g1 z`eGHfz1NQneXB?2%F^;8XJO@FCkD(M6Zz8Dl`Te;(dgAOU1lV)HWT@`URp_g(Vmdc6u%YXc-=wy z_t@$48pK^$?HZQG;`T1z2(97auh|8_3Sh42lDy=6eRDC3Ou*5k`Qrdq@9msQL7AAX z)+meE8V)yvtiwVfhk4Qa@+-;o(_rr$APsC~iC-#{JZU$E4LN~C@~a-QBp6!2guEr#c&Z z8tcE1QWp4U=>4O)+xqC~08osyxgw{n?;+2!o9)%j)+ep^ZyZlQilu43Q+*}~0BQJb zPlSoJ*=K02INn3na#^Y+swB1~R3DAU?yra6C!;Uk=OdTcJ4t%>C3b@3f7CMix3I6O z2-G9pEr2!=)$%TLCQG&C1q%2d=tl6CG9=BDK4}iA&>DBZoplk^O z{bU*c;Nq&ci-#GGpLCU0`4W9kwdQ{4^=n~^fEBE=ef;H;1H~H#rdFNkk1kVZo434(Au6lC8hqEYN6#f8)K@>hOSjcGg!T{ zG)!xjjTi^VPXF`ggrA2CIy=;t8Jd;|`VL&G>d=S8yK*&Q-~07wuUFc5{1R1g3QaX) z&(fqSWpWXU8^Y75`39f=&Z!fpMleP^$YmH)=fg7!iH!Mi6;?Csk^$1M$EzD7Z^bVq z+x$;CsqrM!esLM7!oIu!`4(EQ;yrYajuK`IurQjNh$ty4DbGrA0fBnV`T0B>8z z>JVQhEcpK6LULs8#DF@$C>)bPAT!uCZJ#^~seZPFN>LuRZ>(-Q)_%4@Cs9~C9ZA0Q zcuY}d-#}!WM#413HhLqWD8EQ!0V?M0`&xvM*>6sWgOjR|Ur{BUsIBtzH_6tLxZkiA98_XGhH0Z(X_@) z&Xa35Tif=-BK37O^1Hv?s1^v7&9M%wfOT{hnGm}UaI}_sF+R*&hDSwj41DLjzu9E% zf}qicg~X3d%iz7??2zfd-$ z6bTW{A=;pCfXIr~5XN?WPgFR}#RQvA*Y$(iQpBF&S+>-qa#O8%$%6t@6Fg%*VLEtp z@IlZdR4W5H4wBYSMbD0WUqg!4bTm%k;Ja?$m^0ISPBq(k!+y?6vISmveL3BcB4MuJ zNJ=rKp@m|%8AK{v9p?RpCd`={sLW5YW}1i8ViW&p^rRs!IvKJZKph)wR5Llv#!rd) z9VNiohjCgJa#g#`uWY;|Lq!4RF-PcR9)h3upCod>g(qKh?lc8z_LKES8rBov;}dZ0 z$lC!OTLenH<;QLfO3wk*?iboR7qO(Qe6?5Og13lZIpp&-RE{FlQDgZTP^JSfPYq=v zAxTZ%_?A1=-k}DR>k=pxW)Al_B6v-EuqnVHn#@b)UQ}Lo-&MN_-*1J~Ph~#^Ap%oX zP!36T5pg$(&))yOhDioqCjDgp6MRGBSkTHAvX8t0-62)6#7G>!*52q_@NX5h#5}Ks zmqQLmk^!6nRhZIrKZhh;yshQFvY9@5AK#4ea+;hPv$Mi7{*{pLa6K_n`&xzse|QfV zmbLS~KV~$TBOn<8$wPI0y)x?@-2?9!IBTs*NPxe%swc~JvYoMcr@8i|e?h&CiAT%+sBi!P?!~4TK-Nf9rPnMd6m$r%U1Z~=^~R`;4YjrFT`z?n)>;>j4F18I0;nu|EQrsJIlkn7E|Reb~Y@ zVctr%rHuu9R>mWwDiRC!t=5z9E{}k&!x|A&=Pj868oc^>qj3AtujmfVE*>vK41&~& zj_~x?!{f>t@TU7a@lbnZnzUe^YE0iW#QeVR*bRx30{*PLpza~J8aBC$zDqdNb&lpe z26E4sZ=gDG39_M#B1Dzs>k^EFg>|35lxz9B{sW_L>|+<;qSlCrNDGriT4C0KHC^ls z^J&RI26%pN(sn8Id>b|^!*;^&jsA|=o z7EDQ$SQ9E@JVV`vj0=b?M8WvALp`BjF8_{C-RSxhxTkkis7(i9ydeKAK3QdinySlj zOD#vQGl$`65jB>7NFIk&AO9zA za#AXVBr`kYLY`wBQ3tyejt+f;>kbwhYHUZ3{}nCGU1Ec+_4);f1g71 zY8H7p6R*3yYOaAAnl$`!IeZ+1lzMJP_CN}r1hOSrB~e5}oWpl_s^66fb*rot)<_sc zG=HxJt=HB5DBas+;DwWHw(S&^a>JOHTEt9LJ8Y^oK(^Gl#@j%$OxOA_Nez_F%_X=b zI#f;h2gljRRZ>Kir1`f^&(25~=|wTESpiCN+V9v5WKDozP%p(TR&}jcX5j0rxEc(} zL`6|}`x@xPlaGkulOYu|)AQ+egX+G@OblCk%lB4> zqfOXQiK|<08rH}x`!Ig+V6+_||D*x7+dbgSiBHE4-i&>T?K(FY2hslP9?e#>iNC;jOJ z({X|okXV-D?1K%J0~4|veCN=E>7^(}xUkF3;u@@2F?(X>!SvHrpH>EWL7_|HKjj+3 z|Kk1FN20PM(W_bBBB>{*$Qx;K5ToYA6r05&(RPopp&j)RfGN{w6qdxU`D`}-l*k1o zf(5D`&vbPsG8{L37qt)ezm&I!wSZX#%Jxq`0z9HdLs~Kd@Eti2J~j56oZ12$vK|!z z^Wzvr;ni$SHJ6{%)U|NN{8*@&E?EnilE;onJI9;R6Z~xh9(XJwlSu|R|63OrC0sZd zyrE6~e=d0*9#y~C8>#gG@o0V?3oPgclz**7`Ra|7+0^_YFn|C zXD2b==rsK;4O_O9L8^#O#H(J-Wl;c~(~|p4F)>nU?AG$Zq$~cN{`P~44+mgm#JW=H z(3!@;Lb&ks$9a!~BJ-T~tl1sRfx=Yx4ZseY!mTxD_&j1J)U&4=ViF=}hONnd?M2~< zeKYgRgU?y`itBqeTJ6x*_$j6rt`|{m(WB%KAKEmore3tJ$WYTNCQ-ZCjRSYo92czC zxiDX2S6Mwz8d3L`IaJ*51yWC;$G(j0p1?+%tmzz2gbUAlNx%0nIHqk5NQ~V_ZcvyS zJyDucQ`;{j3}F5yE$f!h8gv$V;{sJ4Sb&TG1`1L*QerF=t?M!@wF6~}M_0~-T z=LVJ6k}GeqkcIv+0TPd|4hC2_yLDR)8TSM$lG!N!VA+&elUxRe2gbZ(RjaVER_gw{ z6u`VafI_7Du0vHu6;Sk89G%cQ243*3 z;;tSPQlDI{82B^xi4I=FczNm@7sn4KW&vDh@5pBERj@`& zqkJB{^JwIbpp)l4Ku_Kf(1Th`58 z-F>g!N?xcJiEq(eg3fnog%Wxy==PcXwX5bFAJc93?)AjJkL($bsbP+}`CC~iGvYp_ zKF8OOfixuT=BZ`k*-MhY15>Zmy6hWYE-M&(^VLVV)5Ha2o38fhf@QA~vvy|(Ge>## z3Y_ontG%OZn?fIam7N^EXO50(=WFl)OX6VmxgGj- zq;{bB`G(ITUoQ0Y`1FbRMYX!A`7wUkcRWZ;Eq>Jv*CZ_}QVZN9$O zzMn#pBdBm)>=xlX^-K8H9f`z#?0W-K2AYL>4?Y8aRcKQ5_Ctc5*Dw(SA2)5uImpE_ zSd#0i0qKzxQ2}|9=LZH0H#OFrhE~tnTiVHA*8#7p{H*1LQybYxklI%=z|E&u<^JQ|f?q_g=N_fn1tP^{CD3euEJL=@7kS|} zOqRaMm4|oQqM(S=8+X~&q)n+@@2`dhb-UMYA=WSH(Et>f+TC+X&h+k6F+vdQi`V6Z z;E|cwEYcWw^Aq{yIan1wHB%bNsDhFt;8{63891g86ww*~8Ke&}547$NnVd}t`!V&d z3(23m;z(ewP@j6NGsp+@s!Nlw@&3MSh>Vro`ru?;$TEwX4d4AH-g(hyZ@m6>8c6G- z>i%!>E1By}|1QiYJEivwGusAYw!d@KAg|uIvz3eF=W3owCM5;Js;fR124*V^6Y|uKFGn{?jdF2epu?&vkgmH=fpexD@VqH9?Cci#D|k| zjh9y7YiiBm{4}eT#&4a_;`Q=JsNbJsF}5NF0NV$a$`g|nX$12{>9U~|;~N560-0xw z&V^HEzxTL5-T&&$TsiVerYs-8!;$N=2Xq_4FOO9fTeW)8`Ds#L`(k8O6$wGw-Kc`g z{nB*C6kWv1bbzVJT|NskDr&O=DjJ2WTj9y@$j zDK9^p7s;y9(M8P35iaO?#?O2#eWz`=0sUMP-sp|RK@O>Lpk|3Qs$WNZrLd{BiXVh5 z0EH^MeiI{=!O3WzY9y@}*sCwI1FxRXFA@#By!4dtb`2>Uj*O-!Ty5SLFMlhU{-mQe zq|M1H^VK~`g!s^p58idzYdbpWe{+!7r)z6!!u$UI^pR|` zP0hJj{g``iwLVFFUc2Ah&Kp+Ao#=07WPEj+mrQ|zq@rJ+76`-yl$TK(X*D>o;vDoV7{hry> zAQ2fdrg1VN>eMiRsXoZafx^=XIP$8qs*0y3p}|IWq4sBq{BLIuJ-R<$mCWh7;M;s%1rVJ2(p45tyFgmTje*spT_b?ccbA{1p^PA`7zN)*T!-bWg37jc>L{reGw(;|!VZNqc-kU1|ds7V!j~C;Tcq=>x*VL=!RQUQT6$zp- zHi!i0o`~N*IVB+Dpk*P~P4rp=TgkZ0o7AWLV;KdKw-O-;QENY(f-Vg5&G8S~E|&3o zK56z3jA=8VqRiWph+NQGAu9vgW-64*cJY+B)m7V$x51e-lHuI(6wE;mi)Ra{sj>q& zi%JRPYklsbBS_#9!;uA1`FhG_n#vW)DP)qEF!7wy+DGQQlzidSS|11p3d9;h6JE=` z$GSA{Yh~p-_j@)UBv$*cDI9G$^UjZs$?lx*v_9+yW)hjqk(m#R6*A4%uajTQZMRIU zJk0>RUweJUqbOT(z?6KDl;|wxt$Txvo}?NYcsh+Og3eS40Fzu}WS3TmEOe(|XsY7& z4YjF`is*qbYBCTYE*1(oO8t3yuvecC;D~sk*bR6Hf-N>P3QQj=xwc=P^#cHJ>aEN^ zAvc%6%Y#a&b_s(sF0O*&e5~8Qx<3H|NAoMS4`45c3xH;7goFRq8|eM&?l+v4Gzp@{ z_lmqPvV_pP%SNj(en=i~oRoy|fxar-9ZZibbdi8!tw6wfaV@u%LiJ72i*}1&fIrfT zZO^L_W{maIvj?%pD?w$guNjVpqF?0Dht;)Vv4tF$+w12SevdgBKR&X#EL*-}w5Y~MzT*xNHDLs^I zeLu4|9Cxolr4Eh^Dy@7p2OE0bP%S5~x9uKZ%#Cl|cMnhgbPE~L6?=QVs$2OQ3J?S- zTFjQr7j`^^L`v2w!D$)a3;Zuco2u+D2;N{q!*b6E3<4aW(a&Y}8$ zcWE%}wB+&Y~w8ld~qiXdk0#;@alEOrQ)m(fMU_=mX~_IYhR|1751+p6&6L zV+$r#mdij>-YGah_~tY#UdA#r@b1}IjQ7X{vC~~qmS@*9DP7t0ll3At&*)aG>-Qd# zedJ@z3bo_6&xYE`EcP}BMw+-j-r>_HH~@NESAUF66fME8@&vm`^>=^-9`YW|m72|V zNY?7SuCGbJwEFr*F69T~PiZ7W+$dSCAp@;w4W&1DPoc&?D<7_PQG=$PSi}vS8@ybB zUJdyRcjZTY%jqKA`L9&}DlbwXHXX7krQ$TM#2{QeHF+%@@J>VG0&(b2{?k)F5Q5)( z)n*OVmd1g^zkaW9-t1WU6HmvaIHYcWp8&H>4z+$X8ENb=KO1Y9i`f4{4y8Wq#Bo0; zo|Xcytu6k*k?U_8&>h!{tA!d9)eI4V6<3KElck4j$q*|s8zHFAvwn05Eo3`3B>?h9 zLrO!QS->aXN`u&aEAksTxd+XNItj0c&69d%yogrVe{gV+)R5l|EaT4B^lP)a8(VO& zx4(aTrIcL3PVI7SV1jHGTS$5LUSGK~fhdbUi9h#Go*IvnxJ@L{M>6&aV>x6E z-0J>ttVzI7V_x!QL7!P}8-@l-M(|_t>=@7goayH&*L1Zu5N9#`kW! zZ^~1Nu>aRm`P#c>bE44NQbKw7AneSJ*}HYJQ^dk#^h8`IkbQeS$AVzA6b&WpfKGqo z$K?YX^L{Tnz5Br^V=LU>193r;!}b$y%>K9L&GIf_*?V&&G#NSJkCx8WjPIi6E`@|7 zE8mTsls4-3jn1MfQu6W#B~@X%d;t}iXZ!vlMlo0Y4RZ*p3@HzAhton0jz5BCCUy{S zB%PeR6+tNOk&w#8xHCU6O5$4S1JL%1eiP{QS!M zMWj$U*zKA2q~pF1nctl$Bg(5tvC)k8aW31J0gL`Z|1GD^u3@uFvh6f$1&U@|*Q$25 zd75~uOftOr*Pin4ib>TLa$?v2=B#!;MqQfbUk2LX+E$ELU332J;Eo`GUoEWdg8()cNPL4p4>#ggV=~ronV#4&Te}AOu(ey6 zQ)9khx;N7hX5jDrhq#*}srn*psJ-_&9x&C|v_yOZ1wbNkAS5B7VJs7>KyjE=t3HH( z4~L4S9W~8ZoLsIGyI5aoHv5eHB~hB!Ng7+?=DgK)`?`6!#(UF+lw3mgwI6M^+%#_# zN#r!C=3UWEur8b=Al|XWHn(E0p~5NFs_np>wjKb{*KN#mJee;0O@TQG)rrmd>YtFd zo{f7@?%+^CtR^n@Jls9Mnltr=}%Dx8vm>N|MwSKapNYK?-51&)Wc2HkWUPajdEb z)NGh(lb^r34%_|^W+HiP;bY%xxNL~4uGUVV(aH6mSF6nC=TMV&;J39r+WcqLWswu! z_CcD`VbS^QWjAzo18`6{Gdec>B`j01m>71<1r=W%53{{5Z|WZ-8P6ut6CjMjM1!!^ z%~6X3{nQSMsN{fuOUihAX0U0#cQvvrwa)`X4e}PE$+eKzAwUDMQnt#N0*6C~>e&9i z^kDVbC6GX;Bd)>e#X%r9gvAK@(9vVYf}s>uqi|Y2?@voSpY%MmByzLY<^gv8hh9A& zyN;Wnw)Sy0w^*EAGVw|F-XjHvVUygqz8Ij2$IY_Pu|Z{x)f05#eor_KLop-9P%{Oc z!_5S#N#F==EW)u?AK9EE+CfCW@Wz`%o9)MU)C;^VBFH4Ay>G5!loHMIfhid^&YQB7 z8LDi{-uQ(x$nyPeIovuBh=m7x0s?~iGmtFPl{9Z#`ug!nk$|D6BSM0|u%EZ&=E}Y{ItaR;_-S(=)R>Hq!InCfuYKS#-a2znYS&ozTXcZX z{q|#MCBi-nUoL%2kdvOPedtxHafcTy`43f}7icR}_NZ%Bo@VqZ@LgzmfE2P<*MLju zG-oAR7%+{@X1{l&HXf>pVMXbsJ-y$69lvhd#2^-jpMToXF$LVwjyGW^*`EQN$4Un^ zFd8l)PiAxD+`Mr0WPNg94^R7udO@hU36J3Txl`KzS()D1BaOGBeIA?OZ;2m` z{CisOA}kkq??C<3j&qQtMg~_PaaUZBP9SSMFe;sEU!kd5y}z&dajmm3_$;|^DH2hc zh()fwS8m$f&K+(eGV~1(*VG=veRmGEJV>4^ZV@8H0|`X+O{;WI;lnsd;~^;3SHs#g#EEA5;&Ecr0R0NIy=B$ zZWR6u-8k^gip-`oe=+K#-!w2_!L{wmatMt+a4MBlws52dcZ}uME&BlK5ei5RDM&VmtDtrf*bh6P@NWfJ( z1mmu1gsIj)BVra&{u7rRy%S8RUi2^xPz2~KdK?Q zMVZR;Yca0$dl>?xD$snMMC0wB5aJYaa_Ot9-S1cYm|)3p+XI<8_0Xev;4pp_k-z$T zb;bR}fe~%VbF!x3haNy36p$zdRXIqNL! zZRMy{n`aEpH-ae6-|sM<8hhV9e|_dG%S=F0gxDPD&v(M3xZ3P2)r)9IbpRp)H$*Cn zI?0(uYUb1Xji~SU4S7LTlo=W{fdySz_72!OnIYmuRN5lBxkx=|t$p<=0g+GTGu)Z| zzp9xWeuOj>CvphTlT-<#$-T=BLZ4aGuKZZAhyQO})(&UJ;2?XL`AQS%WUg*%+3N!% znr)Bo>9Wg~0^4W9$S&uT>*iWa7=i{mMLV!+@_zaL=W_lZ41I49p>e^yiRPW&BV}@| zMiWB2@t>@yA`^xqpEvLXSpRNv@0cN~!bZXe)E47? zHMgFMHLfDiV&;HEdnfk&`OmHuY^J}k1}-*CxeRO(9PPZ89C!JwQD7!ydsH%?Sdl zBu|^BD(t8*+%;s-9YlUC*tOIat?yr#i|y;(dqP~|50)I1oBi8R`S#p)Kf{JG1%i1v z)Og&Q&VSZ7)!$XOEi@Yk|@+&e^xz>|Ofk$0C-(|va>a9%G6`uRGL#v589`aUSU*}KsvIZj>geY zffP%HVXBz?07YT|IeVnhrp#t9bf8M|556BV3V13Yk@Rm754h1G+?h&IOsXMNcz;<# zd*7Tl*5zf5&u=V-n8>CedR}P)uhp|?y2R+?ee`hx zN1wULqP5*#XoRG0;3{yE?V81QY&z7GA`pRx$B$_~snC-&fV2JLYq>H;Vm(tHQKKzq zMuP;$0Dc4Sro|ewp;&F)F2C9VO>$wC72&mdcq2d{rKbV3T33DeVex#(10e<7Zjw)p zpG0!*J}%7@H0W4}6n}hEMCBQd6Cy2W>^^AxCXHAUeBZ3h9gOz`OF!#yUQt{CnX3c3 z!W4}3StUmUIV78-`h!*eg^nQK$P!`+*@h|g%rV#bKwVl{!TQco85fFQ?PEsY?;de-4jF`K|85`%hEB4IZELM3*@eKF>k)Jwk;Qp-@cti3mW(n0h{}q5)rt>| zYJ4mOw4|zz{uQgrby2+;wl;$1wz3naRZ@b(0 z3V+tT%XHilGuIgJs+^nyK|~`m(@_;5yiVPf1e}3WALPBNC{t35rJ_B11RI1peErWu z*VEhV7jvlOUIdwv#*Ub?4`=63j#56@)ZAU(;agArh`}IPGIFO_U~nFf8w}NmV&re} zrjgBylx<|QfZyAI(d36UqoRCo(y8{z_I*BQ)h$2+6Tx#YNz#@tHY5;nW(*Gtv3Lgz z4X1Eh9Jh$LY>zR<*q=&tV@!LM;rC?Vpaq+G+i-+)LV&qoT5b~_ip07u#!ZzfF7VL# z>HIHMahd^%bkZ%NpY@FXiE!~G1D>8fyt)d(ClE4r=4;lQ(16^+S5LDU!==)Yuvz(O zwb>eSL|c(c!Drk6fo43NW**(<|V-*#hndAX7tUR*UkZiJ|g6I$t;yZ6mp0uy)6 z4>$cVRU6fe-`nq^5Mj_2(I_&arw;%4NcO2wNrwMaa)#?)ozC6P#+WdNoTweVxVA%U z-;t~C*GqzR=Cln8HT{w6!+v_hi^~rcddcx||Lw6nQg=P>Xsl&79eS)@s3mY$xg}W= zZag!x5H^?OY{nRq6pd@!%(UpZ_CfNNF(vz+_61J9@Lzb)j1lw--j^o*3!z3=XJZyY z^jYh$wP>i?yMLUo27l($((RR}*`(vH zAM%1WoN*Zbada%yPjpSwJe<%&NKf;UQmS=uadA0UkO<< zXS~}c)X_1KTHh}0htj~#QxRr7VX7h{?gswwJ;YJ@BFR)(QatVpkYXx5EhMZH=GvX! zq1}@*@N6&!j7Q#PXvevYdy7=J5Glk23fS`BF3H{%$xF zoIZl;h+h_DS;zn23Yf2~ppuzk#Za-BF~umT7d&WT{>TyuP{7JwV*g;2t?@1VieB$w zKao4&IRW#6tf7S=UU8G#@7WT2>#zOvS?KHbLr&;!u8#PcYST?NL_qSU&2lu_zZyrC za53sv42!Vow1?D)Rp-+GjPX+yRc%%oB2>179Z_@!l4njw0XTRaX{v$(t1#`TUWJYd5f@$JU3Cbp!dd&k5o?yUh`z085v8YYd$^dnB`5tWw32Cd zl(Wj??6?}($WR~*-2W-&!$iM1-~5@hae5*^_#)LM)K1p}oW~?df<}J!csK*Bna&z$ zg7sTS%lzgCQIg!xBr65cpRM(z*^-_$wD59b5!TV2YvNB*?j<^Jy>F|MIh;17qKGD& zK3gxSH&lq*FQ|uVmM|2O2_)>6>kW295^h6+k!CkD{{_wv2?vI(M^fy}TntT?W76)M zrG#-p@;+ZG7$9TA`>`R<@2HBz0I7(<X==Dqy;qbj zRzX4Bbr)xnCfE8fBY1`qmCx}b{pkYh9+OuK(RrCTW$6!>8S}frz|N!2gh;+T(&zfau@K&#D?VunW!fMz z>zZ6=pu<~6&Lvs!VimVANrvB7vEjnlqK`Zjq(%-k-HZ&P62}<88g?U z4pbV>r-7g#-#9j|RgBx8$`9|3%po|Qwe1x@^fEtQFP?zDnXH^fp07&2Bo-Z8Y%d&d zZN2f9^L@!cf#pb+n?uPxGK5mHw}*!*S6^DPwZDgwshwYx!Em%>gdZ0FYr3$hT1}n@ zr#k`0RkF{tZdPqkdo^=MWNxn|gJm%m2Y$m(pTJI1l@BSO?x zSaxC+xzIb*o=6{mU=O1V_i>l5KVD?9+E6a3fvf90n5!(1pF&yiGmT15JP9{qts|e2 zn=8YE(EVaR7C}WR+yy(+qw5!ij~j`ybL#z6Wt7^REU?(WWOcde#eAxe>;2Twlp1yy(Lv~4n!nn`@`ICof`}L{zFCl20e^|rNRWTHGLK3Iaw&6jVG2`KFXjM*UGqtk zu~`4fNU5PC1WyuWk=J^Cx7=F3J-OX4Q*2IwMEGn#i1grNTT7>c>#S0_)VoK}V~EXt z`C~EbxaQ3(DM}lq`t|kt^*)LNXGF@8b=IT(>{YSqqw`l0*}Mv5LiP-tdIBk&i-OjT z@*|}EZ{oh5P4%-Zz*X0C`NzKbT*m!91>?8b+{d(Xq+5VMmUlK3itg{u^m{Jx!NJlz zI(S|5eaV-w5Mer@Aj;x!p7;D>SEaRGw?~?HEa-1UznDgTspQM0>Q_gde4CJu4UgzU zX}vq|{jbDIkRs_Jcn}uItF=4h0kG-2U3kHc)5+pVV7L>1eu&ySGQJu7^Yfw2+sME` z&S1=XzoIk{F?e)BW+sPvf6+@`19IXb zAnNOx%y-W3RpwQFeJ+8JB_yM4)_+u>SFtzopnv{+Kz>CW=!T3Svs9E~nE6|D(g9Ud z7HZYo3b+@66%Y-^rzf-EvIkyx460+H)xJFmna#ZLFf@iQKa__)pz7#)UYuN9O}Z^dw5Rx+hL`hNi5DjL3BOumASg35@jFFWH~TxE&v!h1z*X!J1%%3Do`f+ z`h!h~c4F1N8-vY=9Pm!p^4rcT9xpfVW{~^;?wzgmh#P<&=x=pA#q4>1fS6xW%xjg* zotDlm^QM#Ifb$nley!iw(6mxg8Xrwz! zoNY08hjx>u0hci9s8c_wL*|P!EkxG$OZ5Cf22PLO!|n&jwR2qS^ZAIV)HSRfj`hGn z(owk{4}K47Lmc&;4FLUbb1tLWpD$CkIi6Rk@uu2ae1$1OutTu(zX&fjzxFH3hvIum zlB^;V;1>Ex){f53{de(&@!lz(Fd%jjYf;bl{XK*mKfe7?s5#jGjSK|Z*VH${)Sg>G9_+NXpm_g^eb?8>wnC|a>cc!}A%W$&#BMAS4b|ugDW0Ag zJKjO&&v2iuEBNBYTK^r;j!ZV6n2;~gqj_t(eYXdFDxy@C$n{?nut70nu)a60P_e+D zCLZyLkY8yJ;06X2CANln47E)yS`1guxNJRZD~0g_{)GG_?iB%<6^54jzE7IGZ#W%I zW#r%^NmDQV2ZChF53ChYpi*U@O0;m;@%R!d90$OO>YXpguQGZPS(p_KP^;IpH^k&o z;TE7Gj6=`=P@{Vh=S^TF!ePyK!bSoN?Z2c5MQ3Oi1HoO|`U1>QMWn`P`Npd46<9s! z!NNe%q~X`;h`$iaqVyu5{=VuP!0tG@`g3q`zHwou#rG&UD=)`}?k%hQdcnTDqjR6X zjFm~SUTRdP&-uVUlP$;&d-)rhb`s6lDOq0Y-MIm^fo&zwml@~QaTbte{{9nR#a)+q zfa;vONP5#ja)Z7+A5xY3N_P0Z*WCDF@}6%IaATBKdMH-@s8o|N(EIp)V{TT#y{t1X zsx`r%Jm8_lIx(3ag{X+mZZYe>6B%WG&SdJ;eBa)PDWG`DBCj?bYl+ah)KIy(?|T}Y zeWt3x9{G^t`^zdzM#TBIP`kx3gpA*YfC%9XJ-6)tNID0HJik7SXDlvj^<;CIPixs+ zHdf26WqV;y#4AzKob90lY)}a|Ut{IS2%z3df?vZb7%F&e^1t)A&u;1JGP$1;JVxXcgr) zZ!QgSlQ5GzU!H`r`@ovwOGq|vro(PeDOOplt54YeSoeA`nb2tB58E(1BOt)Td1#Tq zK#@@G5tf3av3BO3js$?L;nll4y82Q!i;HnUPJ9YPUj;V z>BG_W7iQ~?f&Oq5Yt_<(Lc=jkRMjn5qwvN8`O)h_#fm|q*9321j1D2yEI-6!EYqv3 z+{{&~%XAC~mG6#DP6~G9k*zv^323MST0@1i31vm*B`(h&54!-X^jGrh-dmsdFLb^j z+SF6u{=^jXCk%pgVtNx+o1tpS)e5Mc_E$DtI$IJ3v-#`k?ZrlbP+l_MQiy}~Dqy@X z8em}D6y+5Q6u_UoUOD$KQ4oI<;vyu6gomNd8v}Wrl;wRh;c}GwByjJZtncL{KA;dD zm)7uMXDPbu;Sq#?JriQa#rZA0T&P}yP^@W9H{m!PNu#^#GtLY0^7EMgmjLkUyCQLd zuaUO&0DX1d9V^b{qphE2p9B(%5Lubn_OibzWxl;;YNLHrxmYJCpefP&bu+VRCHC98xv-Z8>z7jQwE-+mZ3HMk+v7|H#oan)s>6VsKM&s^kl@$Ma1@ zDGj-B3p*z#=fze(TH+D5_lmCva8?thw#q6rXvH_}hM?&?&gilNjh6%_y$`B6HqHKG z2-=>XLwnj9P5Nt6bx9J?(6{AoePwVDSC{NVS77)YfIQ-e%F<1%_{+k(yaH(rAx487 zbFN+@nsWwql@ds8xA6xLRV3DBW3AsgSkdm#j+x&My2wCFIftd4ySqh9L)a$od&EaqQGI(=)xkTyB-Rw5gERgxK`zE>)LuJDRDs+^lban=!Uy1s^7YLfs? zA*M+&AqFgO7d15=R1V72Gf`@hpb+k5;nC1nhvUwpJ=f)LnIqr5AkO8mp*D51tLuc- zST%wAHty3|aN2cwm6-CXh{k@D9Wsv=4jfmHPv&FPAjQAZv_WEAB8X@}kI6v^V}RZ0 zkBNk{!t0&m>+RC#MHk~HJRmG~I9o>Fa5R&I$hz5_5!em?hKCJE5}9R;kw-0X`m8VzJO69&AXE4=qdHZSKdJ8kq8R zxRCea$OMtRUU$yhI-lxb?s6(sW-s~qKdc3(GdHyxW7=Pgy5P|-a_0LQ=bpM@2yi7M zP6Cc7?W$YOf4i}AW&R$5(w+q1#?VPniLe7 zjKXDhPa->PLmpNp7lu8q24tg{>T>XE3@K37jt%83Pz9VCtR(zM*sHn;fDib+08K~Q z(vW)M1?~NSAWOF-sf@#+T(m#K6RGP?{}w3?wCl%jvDOx;Rv*44Y1!$GG#coc_aE-c zaoMiaiyQiQmTFqwVa{dfWPe9#1BLUw>H|au>zguNT=mNVrE&S;o2?j<>r$C8@o|f( zmK>5F3xKpQyg!9{$&_cSlPzf3X6&{794#W-39k6fQ)>X4&0K)MA?F)Z+YKeG;Si|G zQ~A8qiHS(U*H}L7xxzi;ETRtO9e)On8`&$-C%p*OGT7KVd*uMbA|!FS0}0%L zfFpEdGwi2&8~YS7Tbt~tbpAr}TM&F6df|!X5G$m~BHhY_eRvUW$DziqCrt(*KV-@k zk8uJeE(l<(=cyXCb$QJ~K0Hxc*J4;AzI(Nsyms3>KQ<-(DT5XZR{@M^E(@!Ct~HtC zf${@V6pqz~IXJV{lZZnq+R7mSP>re>n&i zW6n2=q8(a|oXh^r7`xMyb)AooG|AqBoclGv%gSv-$TX*$H+clT@c3f#M5ZXkg+`mcVgtwQU3*{scbve62q#vF%bdan@G;UL!yxc zl)CiR={tLj3}{qEK;$$lSK|hmWn0Las5bk26h=dbtfS@&cL7}9O8^qbHZ2bQ-vUgw zzQ6ljRbS_D6hnqKp_qK3Lu8yqQIX}BQb`*Sf!FPBB99h)is}|r`b~&xeZ1QFewH7C zsSKQ|iy{*$wLiGPEQ%0^uh7HDNW@!wo2pik%3c60K`XXn-Wt}rMIzq<#=6wfa=Dm40@_Lc#T*}Q+KPCG-FrJ#k0#~;n>ezec3 zRB*FLeHB4)jD*-&>Bv-p^Y{2i4r6gZ^xvj{P38uuB0PvrItWhGsmOb%jdiFdFPZw+ z`yaL#ycFWJJ3D?qB=SJ?mPkPcUPxe11VbzaJe7gEX#RL@XZ00__@OA6E!g++kBF4y z3M8QwRiuv{#doQ74+3V)&!gh0|kFhlX|2%5H4*_!;V!C5B%VQ7C(c- ztKyKx%Wj{4ZE{3;1U~r~@>T+q!;!+P&7e#ZFuD^YTyh!7pXDyzs z{oYg_5XyARz#Wi4NHyBQl=4iWE`Ek^+ZuAw&e3@`8=gfU2xrAXYbqit-{Zlzhh=qh z0)A3mf%MjR#UPwtaOC6cpr zV%IH&;wYQVw~uBx!y8b0m5jW<1-0Q&eY$ZL<_ehtqc3}5+MrN zE2uBH$MHWUNRavv7>TX|vQ-wx7VlfKz7zNiepSs@0hbsnd{j}$PIBbZEmIzkW#VB5 zs5yn0ww2B`KW zNQp{*;aC1}y^CuYM8>q(wm=Nk{&FuZCYBuazZ+xR2{Ez!c`g@1e-BCvH%|Y?=w0#) zK9o5nx_jqe-MOj-X}jDNrjG-#X{G^Ou$u6Om{*h`fg!Yeo@3)HqJ~%0?Qn5$_E3AtH`>XbTgXV4491yu^dJgCjF5wIBvyni%a7KBn`HCf6tPEbjL&vH7U4{D<# z7vK%}ozU*6AD%Cfb7S$}6PbsSrpvr3&IrcGDV6ElEK*8|?=~pWLHIKLj(&_V^QtK5 zzTf7od7zXa{-P52$XobX*0KA8QO-Rwp~`XyuYvB!owb?8eHyLv%lRr8)ceAYlQ~lB z8cAb$_>ow&p_4^qkI%ej6}u$hdC{aVa2_5Wh6bTo%QndP@4HpiG*=Ae9A9lOk0kDs z6|jLr#`5Zh)RvbEmT?Fyqw}3N7FmkR7=vQgLJ)O5mi>^hzxxm|4+7Lta-eVMDy!bE zdY#c@@;870WbnHJ=z&KPa}pDQn(W~!)J77t0hE7hnQ<$grr`#&y>mVG;zO|Xng88f zde*TaI%hZdbiLDXVh? z%J#kvfkIG+Cak0WV1IAqT#@L8b%LLjwRK@406;&g8Dq9)F?-zMU&{;*M)hL^-c?Cg z{AE@2nu+(IA7l^^H)1kji6E^iA+qT;v3P-#3ruY#Wi}o-wW?%+@zVpk0RFw)b{H1KuIy^9Z)KO(; zhusXv>Fe`dL{!tP1Zei_h)m3C%_{8Kzp@#D*MwoF&s9A7-EWoVw><49-l>w`>VZA+ zORNiU=%DREb|~IL&Mhwb?{)G{FAk=qQ-%CuY}cAq;Su4Q-eY0OgsoK+$U+@?a41hL z=jKr5KtvLjzy8+0HL2M8oHztJGn-h+)ju~d<>$x}}LR*0=CR%*Pbz~N=B_O{Dj zr_s4&rqV#K(RPuFqO*M-&GDe{zSi_qIMHT1>9j)@gu(HB408_q1MK@0RkR1h##mF; z4jbaKhk)D4X_}9gC(k0k7hrHp0i`*n!bn-Fd9}u>!W5 zMJG3&J=B^>`(ybNR~QV@TqLh0cg7NyG4%E=pXb9+SDC?)4@%hGx;G`8w!<~XLZom4 z?BWfZwqk4;)wqZR#xl*G-WIgPXCcNv-{Vui@<424Wxj8($UGi@TKbK%-r0nMf`p}4 z51FtSWrB5Tmc`W+%?{QkBVl?slAy{1oY7y8=nGv73e*{MyrEtU{9P)UMBThaTswg} zL~so6QpyA{sLr8qkH_ir;#}D_96JcsB>8i3YF}c;F(7*i*>C>zG{`%SnxdX#OT_|w z?1P3V0m_oLMi&IMDO`R&i`!KfU%VL>B^>o?@W=;C;2iZ;2uevDwb_n)C!Qkb?SUa$ z1?~7U@U96F9`E6kQC7V4sl(A0zj-KI8a*rjy=-VxOR?|1T^r83qRivjuine|=-T}B zYkj6&jTFY-vL(t}I?KNBRu58^da;zCUjeohrn+Ez*XvmUMc56?;Pj19g*yo7XOnG` zQih;heBoOzp9#F?XJI9!RS|}3lKI$sp~A!Sd}CmM$O5Eyr{?d#S7d)w)%hEt9r`lJ zZm3Fa)Mt>cw~6y)Xs{-DxJpssCsk{Blj|a?SqN+o2f%bYBa~NTi0EcVKrOsRQ@HL7yUs2e{2DGCT)Z5h zvwK!r0xSyULE0wp9{#R+p~Xwxwk}+|w);6q+yBa24WkNcvZ>KHKrVe)aR>wRawYpwT!ScQmObiL!ru6( zBE)&Z&F4e8qfhOb+!Dp*_acgavX5ozOyld{ydtje%1w7?89g1Uv5zxv^S*XX!k+TW zptS8U)GZeK7-)Wy9=Mt5SG;wym9M~&vWM?gL}7MYv2@%zXKTJVv{m!I*WZ@k;?9c+ zG4Cqd1OR9)ckgbuQpn?zsy=!ecemNctLW7GrMZfM=E~>mzhw(b+ibXv+c0w>Ab7Ah zVsX~--#Nw`V&CFa9y0Ags~mQw`PQSMj}hLF!8CiVZN4vRp+Cr(Xyp?jTJO^2+ofFd zWtT%?HG?YT15uH__lwqQO&+Wm{}kmA?$wSZ3fs^7)T|fk1JXM^ zL`uFPm zM)7in?B!otu+~|Zu$C7ntFGCq%Aa)yhZ71?Ir;)W)`=Jc&@m1q3pJQcYyEJO?)$Z* zK566!Cov9_?AH$mg@&VZV_y|}`A2aZ8D{+L*)@)XmHgnOov-BEp?7p23w?xK2IQz< zq8Ol>#h@R$^)ihaZ-`YjOBuvX8gpVe)?S@@In^e1ahva5QfB81X-SVn@L0xfMSfZX z6%(%05AEN-C(iQ*$>e?SD#w}aEvL?lV4IoqIvii3Si^Sair+VBHiLEshxN`_PBW>| zDGPZXls4#bA$QbgkRE20CMw2Yxu~czDsmeYKb9OyI8qbY05-NuFEL1 zIpBqF{<>Xxv6f4S=3~pV}8B6aADDs|I{jI5?g60SzY-V~kx2 z=N56wW{s*md7c?ewOd_e>;0Z`!0?>0N>n5s%Lw<1O48Hia!!$$-`!u>+v$@|ja}Hh z{H{$R&9~i;i{3Z4A;xV~p^79RV1A3ffeg9RKa9-SgWvP`a{8=* zHUymQl4sDWm=%BW#ab{Y5A9X_%oh(r;&Oe!^SD1Rd1hT!DvuwI92`lKA2okSQWdX( zv^gA4{3*HFeQyk@#W0k7SD3_I?%7mRkq<~H)qs*VErnW{|M|tVSp}S#*DueZPN38i zrSIvnatrc`p* z#Bx7srAJw^bMb4VKvFs={r<1ASMN(X42XI}lhKT|X*iJKV~CfRR?Yn!-1R;ejl_qz zxJ{^XXIwz3Gl1AszrJgjx7r0fR)TyI79U8GB9@)eUbP1EL*J(IC4Ol++AfqQpO;nE_mU`H^(wfr3cw)>6fSUj*5lvqT# zCFrWYt2x6ImKdDwsqxgu`EDA7)hAvgj>>g98Mt@6WC?$OL?~ z3l+2IZvU;ykDz&$Sv+AHEl}sUei^CVl}|hmVl3Bf&Fw3vDCEPi2?IvN2q-A_f7v=n z7p>QNCJz$35nOJikD4g&G!NYuUlu%oh~q?y8E)UL7A;J}K^tTg8)U4K8XJHD%m1PNmBv_F@KH}qa}ma8$WEG!Uq%zQk^ z;XaY*D1TmJv#(7)?<@YqqOZuZ1ugni;B+{Sayatxp*FOcvcu|!wHSr4jJPn7Ejn7f zVE=bn^8&friQCKOfoS!7^I!j+J*nsPvtqw$MwVYc*^*ALrnveRM=h_(dn!3Fs{g>O z2k_`n%bhioZ#LcLCEZ!f?R9`Q z4G(Vxx2qbfwryj-Coy7204KAwwG+IlA|9G;5z&WrP6O_}9i9Vx{L-X&fQi6Z@*5bu z-ve!V^S!;#t70}cVAK*lR+Ztm4n2+SX~=IVU`(}&_;N53&bY06Qtc!I{ry9z28VAb z;|{AGPZPLh1B^MH=U}X7S~6|hYc-7Wd^a9ze&*s6$)!aim5U0{YkJ7rfS>#QgScR* zw$^%uaNg5OQBje|uXp(DtQ1UNT=n`?AhtfOkd)x4sZyDFF`|k$j{^KxQVs{yMszy= z859R~dYx5+3DoXx141voEw%*xg&%ax8@sh)N{3 zDm#>>d*3%APPw_eE&6#$fTH_Q+0FpFwk~p2E~bslHf(I#Qg<}ZyroR;>1Dl$Ht?vb z$YPE#KR1KVNX;iwMIvY_zE``45l(E`jO%mK`6E0VbT<~Hn*apfuF?%G>&K)gf9ZF5*}3L#+N*muH3Z>)b_ zQ_V>H%27qhm|kVK(Mp{;B*)A}moQ{vOF>nRT}U?Y8GZl-S4O@0RmR%m&Sg$G7P4iY z!39sG8XkVqL5jxboj{IoQ`I5-DmdHe+I*VIl|-|iDUM~)tp)2geWCq%Q}G96c^Z8^ zm2L5^EH#o8LQu#Ovlw~^Cx$xnHgwEUaC*yF{tZgAz9ngpZ2Y&)T4RH9%-T!B$;7{? z+g~M<16C)wM70o6I*7OxbZmn@sSZXx_2M`B@E$ie$$e6`!_vY+L|!@NCURdk2i9|t ze0|}^%G{#wT1-P6l6_xQ1ualoj{}S=vsiFO;k=vuPnLat&&r`+SyWg-n!jI!q)oaiTb~c* zfJzB^ABs-@gb?%2ReZay93GbFBjKY33&!x7Y9e}*cb14$EIs*Y0?@l02AE>y`=f7U zQhjzJT~?fy=gQ<0`8WIwak0J;uSh-oI3?H4r{fJK0wJ9cEGl78j(2zn(U*McE&L^! zCS~FcPf2MrTb0>kj>FZB{YI&$=Xk$8T>4(bUC{*L=WRA_!o}q4c_8=%9$_Mm+PhgF zMxifCKjh@Nh=Q}f$d%NQSH$eII&^E6&Zr6uGFdAa|E z^m6tp*#G(F$lW99Oa-~^htc}zu|QqET7euFjdQiI#}ilo>Rh0nqZU~v&NJ!Nmu4>O2DY7MQ=^qEJ9&_gxpr@=zHc_*R0ci-5 zWAP8(R~RHi<7Wf@w6v#h2>Ac)IwgwQwV$-!vjn9f3vqOBc{Pb>h~S}bsx)1{SNWqIG-$ zoKEkWIMmcvjV436@d~dUkm;VQS&d$MuPb{rgf!;^(6z^M)ZXXTn?N&YOZ7Q+2~a~- zdpAh^{dx-s>1V8>1*%W(dNB8HAutm}lp?@R)twL(0r9OHPEDSzej)^>JhqAEXD4D= zJA+-EL{du=$?Bj?&m-vSiW8|5)iXLkAfKMs+e$9>?67<~Ed6Fo8VqX_Hl9p)l*{h} z_$!zt3a{oy2=90|m}PeRkXR4@PG0qEOuhRgh>ApSVsbN`<(}{>Vg621qf%9T-DTn? zVh$a4OX8MFMoRB3B?w2|+a0MIjG-HRkSwjFaH$lXKlyipN!&!t2?)Q6-&M3&_#GS_hNH2+BxZGr9C z7zVZa;DSoP)2m1>1mdc07^2&?GNx~w)!_~Cg@?**7e>=KkAWB6JnF{u#l%N6*{ELu zWS9J94k<1YT)!~bIKy)e2f{sk;KYgwyTEk);C7$|yXfXuQJa|323GPu6G7lur6lSl zgr8eT!0mAl+qMfbP#pkJtb(8=@=|hV%=eQYZIr6)jHCiaBHBI-{TjCN)T3`D|BgAt z?%G7*)Lz~fK+4A(TzbD#6T%Z`7Zy(RyE5VVbjAMc@;Nu(?V<^5x=0T-MB`6kblcIY z9*$HL1S_7tEOj^8pyOys63k&eco#V%95plKVCe2qKGwpa0i;aLo`i+nhpLQo!kgbj z3N-@TB`N4ozO&h`0xG=o4uk{zN5`7__TPQvLD;L96csrW$uk;Frg&Cc94jeN=ysd5 z5erU$-sQg8J7d69!_vhBFR^ z(wbOOIt~fAZ9f2r69W98_-B?SG1jwI>@b`kB$~kyZUUz%_dyjvrFdNum@I)nYOOH- zjPgCngNWV9Sfn#P+FpvIp8=Zwh{0GgS-bmZaN(9!W>11D*I0aPhhxkLCBFI8_gGIS zr%huSYjaiz+K}S@ z5eAa6VML_ACD0-l$^&-R(m)U$jS_eMhnjS)ABxBW~f0O$Ne~{!#bS7~D)goGu zaq%x~NE=B09OmAVfs)#xy7r=_bNV=*ppFP(mYt(MYba zilXcZS@DpuU?l1HSM2Bg27%-NkL-6Bs|k&fVyxI#+iYLt7Rw{EXw~_CKn|93&6kp5 zhf^fl$U$$?50T)_`GFkr@GjKK{I>hu#lLCN`|BrdB(^wbXb z+~7I1*uRLq=Fk%hPf-orMrT3)mRklc+aN8D1z)d{!gruv&*$@;^_hJr^7!WVEwxVw z8K>6UVXGw%x90K=-c`@a9++qQ9skC#(y(J>kGA-mTlJutR4eL&qdrTuGHZ)f`=cy! z0z;CDTOo`|!eyFyi9f-Vg$nYI4=ZwHwyA2Gg&0g#Vn#Z8|A~9BnqDgXVK=M2Z7r?? z|1P&Aw8{0OZOt#c$e!*vIJADlfMA~H==w(pY&>z4! zkgVR9Hr}W|fW>cCCisM|3Mo5uL*A_eYd`FG!-|c;iV=^EWC+y^4-Uj3h+)h6lby9O zEdZpPyU6x<)ukAL)9ua+D7R)c!gV)i^W95c^5s5DYO-IEO&b>8KO|IW$VsvN{kbJ< z-^I(uFRbcAffA=Avc~K+VH-bOgG>AE=4-RFh#|bmQb`?szUoJb2tRivi!TBCtm}%S z&Bb+IPvag(*@^fdzv+Y+Lb`(Oh$OA8oWL?>Yt~{!_YH6*hr4ZSySA2obWLUz2xb;J ztMzrF<;Jml22-+_?`*6#eB^oZrC1Pd@Y3+TZb52kKq-m^RvF8C_K&*s&E((NKubUw z_o)G|4>n(7owq<^*H{?jS&lpHz~i^!Aj5|6`JYPN88c^IDWcHT5u_Gz>UmwxBd zhYwRpZFc!P;w;@u4i!BXEg zJJ_X0*1sc?{7Jxz{U@G0(hjCev{p2q(122uk6=As;-fxM@fKgPL*>HG}vh?9_ zc%_9oU@4U20Do9n!ee4RB8>NYe$E;r*QJz{ld^$0RK=~)`ZWQOv4cCl?MLv@{$96) zy}11R{d3J<6JQ-dC92@;;OdiG2v4?@z_WYUikF+_erC(v4s`GX-m-RtRjdubMCD?) z|M-1d?0!#;m^1eZBB&&C{gZLo6oGHWLg2o6J%2(sVsbSZarS{#!s5Dr0T1eon_~OH za1j_n)<&E&*cua*>ns#@zdDBiXU11}fu22~ju1wrA%!aj>0oHZvRq#13Wh-^6Aaf+V^kEJoxiQW?!`}DVf1ap(Di_Io&>mlRj{CYo1ya| zRq71*Gm@#mM=>?AyO?4PW1&y>K@nJ(WB-s#0Lv+YL`ITk!O2;uoG4^_`IodEaqCG_ zU*rBvl8x}5@C2&Fgvo^Y&lS(J>c1D-_y4#;AFjWC;o905o)CP+H(CS1mqMKcrsuG) zUkug$*_ULL8`<1>n8nff4lRvH8*)eKm5}vVnM03(r_tK>cAQdS+1;hmH6588Hr8k1 z06NCrE}WD9!;|MOGmimP_O$)tus*4G<(pS5OJtU@$Qn{!c=dRLusOP(J^OEHchWab z`+g8{ow@%2dYe>@LCXi()aEZy9dv7-f44LiMhN|&el7-va1S5&oAwc97Q2gv-*pm0 zI>f#lAuKwXg}C}za(p&bX4JY6)4XY&#p&Mq9i-^Ix9I!II$e?fRe2j(SOYaBe^d&e z#KFboxf!-)#sM@H4vV52V=#Wm3ZkZbl3y~TIZsl4*#^H}Bu2n@{N=nvV%8C7!x~5| zrayD%7@&Y)L9hcZ4V4X(rrW;75!r+MauR-=@jh)(LZwP+7M`(e3|8l**Z=>B8gzX&iFp*NUIFXFo^*erj{D$KiBJqnu zVc_(TI=UrvM6CZg>4ut=@!a^vHQa_pq_7di{Ui4YE*$`eko6#^cr7;viP&uxaLx@^ zAY=J=4J6p`yjIMHWrDH7j)sBYlf~xHcV|0;&uIq)NI+GI4bm6X%4yQmmzZw1yuaTq zq4aux(TgI>bq&<>nk|O_@>Lp?^=+)(C!^APjA`ql)5hka_N37$r^B}6fF==PbD~~ zzS|&x6y!A&<}`|o7#`V%VCcdwCZ_ndbC)(oc&U)_FH52GyUyne7D(uG@2&UaE_B~^ zMY@mE-4!AgYJ|MA3hP%1D!DgXA*Z`Pwlk&Yi=0Ug$N~zmY#kz*NhW(swY&QJqBtY>@e3| z3Ni^|C*t30U94&sEelOXHIn7IFUH08lN5P7FLm>hJ*upB-&-MlRMA>KivnpgePUD;Dn@FO?f(c2L15MRV-l` zP@FBU9u|4JKej6$3Ib^jgxq&6QN&$yLUA=^G0nTLP(pV-zDaWfda&E5p>ZFa{;>>E#y1OnC{)S z?1q)DHN|S&@vk2nH(!{V6yAP^XfNT+5Oa=zzC^ zx`EneXlt7@&D7)(bk_;ERom}XWm33N7lv$160ZjM_Wk=9G%JHMN89X|nghz)h2>L+ zUTJ6NXqd_>bknJ6u#3PY`PWJw5GX9V4nr#hmbtQTNNHW-YHZravlqG>fO>H~Xl2lD zt^($Y8s+lE%m1q4L{n#x@mbG-wO&*~#GD=UmgHL{{+XNO>7X}GP|Yh8dq?8QqHn1Z zXX;nnN4C?1+$z}JlFxv*wpz(X#N9CnHMsRDoWh2d*!IrjOB#)DTjE#qO}p}he}2_# z55%5lkX10EY_C@r@0{0NwC1Oxkw1gW-Y)s_hlBm;$uMuGV5^WiOHCTe+eIbz49C(+ zDXCK^_|*KcW~v(Rfu}Fv6kjt<2or`SkoTX8vQ~OVA>&pbHh@i<{{<(BfR=L=6#RL) zO4h%8i2YTiH?hBZt|#TR@r=~IR@Et5g{DeW;7mbuD%?hw?u#zf;13sm7ify!w5C6G z9_GGv*2_9~JB(7SZJN-WY8{g4zad}a<0ydYO$kU}*wwY9trOAlu=C9zg+P#w1Vowi zE<#i6jovdRJjnU|>$(*Os}{+I7pi5Ob8ykieeL0hi-8q4K|!_}2^B!bVJMnrDIw*E zD<)@}V<3cx=hTAjHo4n_e}h>~s6oHFwHum%WqJqH8EV(S+w%d>9cp$23OsHX@^0txt{AgosrFR4rpzhu#QN zD2yQDFfv&}k^se~Ms3H$3?`s=ik=T9MVDC#Z|pARFrJzdS1WVUO!fGj@9Jj*>ij7z zJ(47P?ISA1lXGlkY6`{=5@(!;0yT$O1*F268|-VyQ;L&KcpSRBAP^MSe}05w0g(nB zMLMgD=!Dk+=k~vxhdKevt3C44!0ObP4DVWb+Ct$dV8Zn!V+MF~NH`#JbH2WDr$tod zJH0=#{?4WqI?Lc30T`>2|ly zXflrp4hMR7Hzt%Q8(3F7siIj0a{hg^H4Zd|lhKJU9RAaQ zz}UR~E(rFu#h0(r#gKNgG6Z=;v`>5~>krpsfB_E&iv!Qqn_e|}7c-)b`E!*y6*X$w z1@0Q&$DO@y9hEl5dp*RidgR9jtm$$l$zCQAE0D=fxGx&iy|uz)u2M<@z9(fL(KOLL ztkBW*FVSse>W$?Wr5CB0Jj;KPnE6x#maC0h|DK$ew#L#>Fn&Py0;J&GtUu1I#HFum z151$}E9@Dj$;uX`-Ot~^3X%^+e{1ZU#8QV~0lwg~lcBaeR}&k4KQp4wP!?bXnTDG7 z7RU6bMb-%E?%GLWMz(*raYz4VNdi#}_Wc87ZfRO+Mj9Bzu$LwB>_7DO&4_{ExV2Dm z&DGDj?v74K5a_ASp~EH;5r*lYw1Jsc4<@6{=@;AQLQC_k_>GoOZ&O&JS-8jmeXOzO z2~2h9ZfVb71q0L#XQ|#j~5zhZO!u2EVk!{o*bHZ8HxDQ@VjEuhsun~-l(6l z(#%#Jy0nvCAGh#NMrtiTOn--@A3a`5q$yv5Hbc48y&<9H#{GYW3?0WUNt?M_xL!6Z zM?(0M&>-FjVbA|3z>2NHZhOj?5O|B>ZE;s z6#$g+1y@wbw&!p~*>N3pOK=$P_AlbB_umhPsVnZxk4nhuwOf*usg#>8ippb=*TSK& z?Io8xFAY2fA~C7lX<|6Zv2pyc#FXxm#1uCClpjG*n1H*dsgYYZ-pmER_hXDy{Wvm< z25llL5vZ;XP04$_4;!oYO-S09o6csz41Y<^7(MTLn$cfE&a<_xQ@6<` zE;PKFXu;W7Ny1!L(FHT_FT@SGP24A{%1m=C%(NqT@qof{G~G;fZX1p3jD4G=wjX{G z74L-5+lgqGUPnf%;mqsLy&IYIHV-sK>^(Bw^L7h;C^e$*@=5>PZFRt9?lg`@=E(Z(H^A|uC6-HKWV>|h{~Q}UT$Y=!c74HR zp_mYdmA;%lk1CT4EV?2XV&eq0YrB>hMFneB+xP*M5ZZSjF+(5UT=8@x%pWBad`6D9 zKg&0Iik4sth+=mjaDg?TH^@X56V_hrcti?xc7$H`uyHxlkR%-9<@X2I`W1s;X7Dur z{H2(d>=td@ga`aTwuNsCF_eJZl@^uKGkoxUrd-h$-?Ev_TSII)j7vNb^W`{f8d5EV}8Ph;5U zLn1cdLO;=E67vwI*Y)_5;@zihlR#O69?fJ8TJ{2JtLn>t7hMvur+vJs*Bg}DD$a2c ziSt}@+U*TKuM3QXuwQ7`Sx3|VD~kfm=qMMy4&Zttt%YC4)A26?G!3=fBTHlVfIH~x z%j)swxNaW5pKdl$Un6XC$c=pp>#>SSQp7@eOA37mjmX&F8ua9wH*axSx*Dzi$W5{< zwnfXs30y#_ohQJ}53}oNK0F^><{7f;_@0}QK+#6fLF2)iGBD{2Ke$hlw^?I6qNGM_ zI=_lPcRTbZg%5Xm(X{yryn#h&i~=LUgif?>kVKyL7DNWZ)BP5$LW@vkWgnCDMsw7< zQ^xsQ3|+X8-UYxU;a236c!a&FOnH;II6vUZvzzuzQX7VBs2kScE^*7Dmaf#rJDub z_xG}p=+5hLKArnrfr%B{Kf5(^+mTE%d5ex_F|KRYOLU7mL3TIV6F8F z<5t(ne3JF-M--$wyWjblQ*O4OXc62oV*NYeMX?_B1@Z%J2Cy)DPNwyUH0jQrqn-UsI<3RUueyBfw#t1GQ zm8VY=BD2ks@UVj5@>hN_sE^b@S>5Sv6mI~!jPPgWMoDIIuowKc)J6ez9!w3g86t*_ z!@0t_sxt5(mBjuBW*#Q21HQ6A*g%x-DHJ?{vj$OSSZ7$zv^SD`_|B`DdXw zyz(LE-fJ_0Fo(YD({U76W3GbGh(QiSZm>83X%{jRrBUDIl3!3l<%Nb248gZlM30); zDMb&9k{flrp*_9j{YH<>;k$z#X_*iZ#Z*7wabPg?oC6n!TyF0}_O;w_H|7x&AxMj& zk0^n_?0)IvO(HpKJ7?besWvzdK^T2}D7C${%iUgASDOE{Sf?PlhhT#mfuCzQu;GrY zH!MYRXw(n>+h;l6vZ$@s%^S@eB1j|N@MkoX&@I7~=~+mgDNxyxx?Cz>K>X4b;NABF zunWxE$3YqSEBapeoHQ8qZ+q`@O-(Ak+t7Pqm+D#O7_ICLUdeyL5wnkOt1Wk}PP2bQ z$}NJmYyqWRC1YVBR}pbC7)Xkmr#(`qSI^Bc{aNW(#J&RrQ4YyuS&5!nAU+lh4sV{n zRK*As?;U5e#s2`7f~OdZLLz%Zq1DGt?ebf3d{X1{H_)CsJL7VvSqLdO2<0Z?BR(aK zBm`~rlavHBk7fdX3Kf|46o!s=ZwvJJUr3BY)O(aMP?A>ZoFT1cDi&nL=QL0-6g9qo zl97CNtLJA_Dmp&qzX)@#*@!cv>bVai6u)=OuJz5wMK995QFjk(mzcCXM=)PxnP+5q zjfswxGX&INJ@e(h*Zd*iutS)S@~t*r9(bIoi)M}4s9}5YH+TEyfz@G4EG}nnQBtgR zc0bT$<8bO<7|fG$8!x{DIx`sw6cy5nW}q@xa1t6QLAlsKS8GaPRsB;dq~ zeMBJC;*4sX=zwB-ig~Yz=UE;Pkx#dr_5;3vdrHhMmyyq{F5C4cBOj4j-$#U}{1Rra zt*YI)_;llbbmhCw>!+kW9p8{@322v?#}ZSsU4A@f1Ry2NlNL7?BzZv9l-A~n(_wuD zOnE5m`p30=FC93V=AC3qy5^UZ=20d0q=KkNs~-F(BSkO;s! z7CEq5zv}}xIFz`_!Ae*(cL5sZn%R^VU?+0baY^lP%L7a6criY`S&n}T;$B*HfEV=Ad(i4VD=UYlU72L@>WV5LPc!6>748uA+!QDS<#yuwn1rg{7{P8FB&%#okp zO@of;cwvG84m6KInGzGz;!R|uABN-*Jl(de2HS9Z>eN^TxRU{-fRU&ZlzBa8&6QUu zab+c3`-M+4#bXAhnUsX055v&6;(YUqY9ncXccrc_20Dk zi%3bqNyWZYo3k=b2Z#zTg{k@CCl2?gV*jW?VkSki9Gab}o}`OH*l2A@X})g@7@&G* zKy+*0B%=6lv{MRIeM+q{;v*48kPK;puS^{I@tgN!fiY6k9LK8DY)@kyS|sD=vxW5_ z`5W`2t2naPLAA+PlkKF#Xk7=dTG|*L0f1YeNm!C*XzhL7JO0G&BWe(83r`qznxz_0*9b2@9 zPjQ-Z%73&zwvXG1hJW~UQvGkdGFVt_xG})#HDPN95OHu(u_0I43-U6|x1VlwJA-*7 zK%khO!+$;_1ZYb1>W|VQ671M1SrHzS?<{m{pI5*ELD5(q&XdU07IG+Lr*R`nKzR*6 z>H}vw{vrB{c75l5mt3@qj*5+zlKv^tIqAr1IjuwiNnk<*%5b}Ro5vAhL{^lu0Y?31 zsNKZ_L6$=!roAXBvX|iN%RrabZr9@SV&)wx@(+gmQ#ZaOU*f7=je(bs$7|sSu5(T8 z(Ijr#mHEAtsIA5uVzoyx0;;qe&V4HT^1H3DYM+$nkjvbh>J-)Y4)zc$SD zH`jmchfCtT*|z=M?3F0y3MKK8`y8cEe8RgUqArx zGFWuP<9y5~>YI7^t|n8zL}f^U*%z~a`)<(q@$$6;^S^nCN2cWkxZl3aceO4Dgz7&G z(!!$O?IQvYfMX#=weQ^8>}}~j*SY1CIsm@ZthIVa^!TS45>=`>OQ$bO!e~tb=|>JP z>Rl(f9>=1T-rm$QBGgE3)KXF+wEYB9E2|03EYu1st5-QsS8Y*QJkm*Upir$HeJmw! zRjkj))#V-bM?VY~G=q>+Z`KV~CNWNF$CX*;5&9R{1-hd^ugoUXQ9j^gkmpeUUhPeN zvRkhx+1+(bs#vHfgb}&hHzyh+|CnhfD`x+@H~8DTDDpIgfIe!7!>`9Tl?NRru!!~3 z%o%kOGEYOh{iAzOTXH9cVMynqSSw486e4jn$r^*B)!8g*KHUzXpoExg zt`T#vEITFr=dYB1=N`V+mUA<+bnZYcBZbbWr$`AUk}AGT5$|YWL||30PfSN(Ew7BF#|qoN*VUHH^fcCBDzk~~ z$`Bk3p2g4*=;R$@VTV?mAG^d+BPkuT|4vfY`YCZ2Bp0E;J2tY70cgovKgs{#+$ZW7 zd=K*}az8?iJsT^!3V8M)q>W!6n>=LU9`Jyx&$rBTH6G{VqO$kHNAEA&71(^csW|)4 zU*sdiDx4!I^Sy_Wt#fBTkxZg@rOj3O*hW9gh~FN4$oCjS45V!}WDH zbYcnj>o$lVvA;VWW6MhV08B?2za(pG5Cz%}9X@J?me7ejPc1wW`%( z!WAsbl^aOgOkY>+0!aUKKVjl+$+wZxU1WG%@S`1tNf zw$xMPgP-h6b=n&35TCD?>n&+%?%$f528X-l#Lnf;pnpMZNw8!3^l5aG%w*#?vl>3h zxTvsE4aEYh=W#?Y0gW4eO~FGwyd0{Ahy31;BMxV%DB0!&Arb9gvKj)m+m^?1Pvwd~ zuc@T~0#Zq_^z5IH<*~IL8Hj4bkTr*8Q|xY!PclY$q*Elc$MIOVXr&c+N?C85=TOUz z^r3qq#}&F2Io~Pndk_sLMal_0+`$MfT<$Mmkkz_5d6H&nle4MJ1I?%HYGWAY2#&Cg%+o@oH1 zkR;z_2D=;w;b%W!XNbQ_4-fm(`dU+6v^$YvM*WK+*Cr;T+g;x=h zPuQ=EOcN8KrbDwP0|LeRLo!)X=IrO8+uufo?oc3_%yd4_Z~H=-hzGMb9kjV2sedjV z`F$k;W1)+D*M0uFxX!`bh>!W|%R-|n^%(M67txXRn?{2xEbfg5O_fZV{0YOgtHJJJ z)9`ouPZAr$qkWy8+K5aP-XRq_!UI{4l2@AzozrzC_pV_?-cEwZ_ zNGwH7-`7oNXPyf(`a0t2B1hGLw#*h#8zmKfRY!zL>twnY=B){{Mps@8Vl3-zs5SSH zJ1qYwnHS8Ip6s@mdVkV}o`BZfyz>+G-7+A7trgR$C@x#QNJ#GG)*05WC{0K2c#{3Y zBOVvqhVGKrg?7}mcbKkW2wZIzP`K3?}^XJ=tffyK{oWtv6c zTeu2Y=MfMjOOE26NYz*oX0c6~H6`dJX!B$9ukFV#1A9DCAP~*Iv<2dTN9}toI{eev z4Td?Mc&_NYTF$*FM+v&$HOaqkk|2n7h}-qc&gY?0jvC_DwC42kU%R(J(_n z2Cey%@6y#;9{#&WIGdNz$jz;Md_qP688tsxaEa-#f3Am`g*KP0YTvG!z_F-iAZ+lx zGC6rY_y%Kid^_8r(lSK)H`2G9RN>KSf;eg^T_IcP7)SoDSfEQ2Ke8xoMsBW- z>gTpK|Fj4;W|B_-7r=Aff9J``g}2lSvw{Gl%Wp?q%q--WaNV64gQN^1zx{|^cvE!Rn)S>4qpZHU z2-Wah!M#k}WP)h0ULxfGYx8So*-`VEpt3jxujv4$r0*{_dGEX4o7A@a9W@3B(Xe=BJCCubY;$f|Efn`T zjbnj=KIb9yd4D_1inrG&h)%NRv*1wOLz^dODJ;qfcCe91(0pR_z%`*=={C%i$-wzr zHesJimOK^->i5kZTj<4oSTGZ-LGN802AoZ5$>>YT8FEd+J?Y!V7$&)k+*RHW7*zO7G%k#=u+jD zc>gl3er-Z`W3oQ|E+>*G=HKa>g?&rh{`JWrlVZ6#AuJ3dQx#V2m*TVB{9CO#q`+%* zi-u&^1hWO+3iHCi&=Kx5P5SDzXw2RQnfRc{hd!U7`CX^1HQ(+wt3V;Wnb;={zI-X4 zqB^XLpxp>YY$!c7zFz;U-M*=hH)nyf=(@SNKLv=a2Vx#ZNHxiuJLF^HrklCML7#ip zKmT`lU_SIMM^l}^kngCo7#*@++M6K&n}oK!1BhXmbRc3fFin4A~ozq+IVXTM6Ku*9%>k#YvLA6et{WXu&szU}7 zvjA$AZyxPLMn;ax4`Dt-Y?%1{FCT~@%r-7=$ilhsYt-_l{eE_4O9hOTqV7J%@(w4?Egb`EQuv(USTr7>-YXkFn`+kANrU zRJ0jjdf&XDXO`|26BvRwXTr~@C;DBM-;bUBFH|Pk59xi#m)Lk zYBmRlp=E1381)h38(KYuhXuwocVP_g+Ay2P@%K) zsBoJ|@5zQvIk!#f}5 z0D(V#U|S&6NB(zMJ4B_$Hx{qX81D5LsxUDF)w~8$iQgX$@zdAG0I!sqw6HL$__@*v z@jV4MLSC~|Sva!)k`}v|=r{V4=S(;PPHma%)nCvdm^P%C2#;k&I^vX&BmAqW79{ZX z+mk%I$LIa^U{c96$Srky%eQc3rU}q1YW)$*O(6?5X0p4N^b-&l8JD%KNJd6xvuCqb znSqX?>#ysm&`k(0nbw3Feh*eT_xrzO)oG+o4;Y*VvUf={K<{Hx$>4%Emi+JDos^Pxh~5$^G3N&6nNd2TR*v717+=m zo0B}_9GK1?9t=NuFzVG}%b!|jhY2br4t&Gwc=(WhZ>qlYkq4g8L;WLy7DUU+r;Tbo25kXxT=wZ z9=~Kp{qz^eM_7pDG-gn^f7D_gKZM}p1e$|K#*~MjE zsHQoVIq6J9!m1nQwslr}8-q?+82?6Q!+fh}OY+SRP}137?&A>>kMVrLZ?x&g9(MHlw3F09@tZcfC{R!$=i=(C=7VxSJlH zVIm@rI?U)8XPJMAA|OiKZR2&SW5$`q`hBLZ?C~P?N<;%UWN}xKR9c20)UIEI9Dq0L zetyYFeXo>s$T8_)jXVpv&86s&wT=w__KIonPLei%aWDfjNND-F9lTC(mcSM`uIkwN zU5|xZ<1ivGPW|<~xDQc@0uyDWt0(4J6e}M>n7LDj7pss$17Gzqa@)#% z{EiTJ)BS*<){LU`0R^6PoE57Tmt^0|2qP(EJ(Np4M1ad4eJi1k&Z%HD&k;*AFeHSS zaAqDPw2jWQ9J~*-()nX>foCI3hgNevJy@xM)dZ98&j4}=MQ1`=O^u)TSl8N{z$Pyx z;)as1T38iOw>J22c>CxyozC+D8u@+6JR!PJ40*x3E<{SE~H z$biU9bi{h{@Vb4>>++jguwYn4> z3X%$Odfeu%vsy&P1&Y z4WKqhT$Prmx0T(m1a97_7}n82A)U&Us(~SonS7#IiWwnC?QvILDVT6Yv$fjB`pw>1 zfCz_#z8GnE$PxVwJORRxbiagc;(f>#^NwL6t9ds78Nsu?42R*A(UJY}iN_x;&X~rL zbayq9xc1at4H8@^xqM`@5&)%4u}Qk1yEmD!Eg0uIJdsKF>?KE81G%^U6WzCz@6xpx znV1n|B8r!$o(GL1U%_{w3Dv_T;kc$u$XOMC&G6}Jps0s$kQWl;ebBnTjQ3vY`nA6##1QotUJwc& z?^Nr_xMGH@u=Tk?S%-ogU9GA`rqHT z_w%MD4X!FUj-q&f&Z7TIUZ}eUQ76tnCI=iPcte=Yz7nlmh}}bWXBksIV`=WIBoJ^- z^V^39$huSu07>G#+T>5dC^e@V z0L3$KnsOv-^GwwGW+-VNrHxijp{Dr#WI?J>#(Q=l5vk$*$Da#kgN;etRywLx!$X0P z=++4b#=35Y_JgeNH5JUU_E#(;z!SWcLT$Asa4f55G)SEr`8*3xEczi~>5NrOTC zLBB}CQ|sHNjQa@(#5CkH7K;+;6P^0~Ua2P(d9yV|Aw*_mxmez>cwR}_#CY0>R(+RV zmZ3vn2x9k1VFmE>J?wDtxt=}!)nsI>kX!cfsJf!(h}V1nAxms~ zhVN6d@|w4U{Xx0z;7p2v^k)%n`z;t}MV*%l=F-qT3yP5K5|U!nV^93nPt@hkobFIM~W)YvSf4qg)!qz5DPcdL)mhV&HEjCK%`T{OY4E97I#7tZ& zdIc?s^n5qlEAqlR$`rF9;a1{UkZ6BIIuEc8G{CxslgRsH`zyb z?!E)47{%-T>>tqWqq;_)3%F1o?ofW#-uB~v?r@0Lsx~%Rx6~p^Bq_D0Dn>v+2tupz z2&M7%F5}2%tEO2)b68i~5|0jROk%To>@~kn@xhNkutMNs-j*1!-{DC|m^;u)UVph) zR~IlV#vHu z4(%(wNvjU~W!D->4ZZb;eCkqoDR@wN#tg*$HznGoAlpCT!vz!R|C&EktmOAK?`yUW zD^@DeFbGXi**tL%LK2Qtt8C*|tyzw&h?$=pp?+BW;FL7l;TTpTy=L(^xn_@P6aN9= zasMT2>d5#W<6Mknk$*GZh}%)tfUb{At%->-?#XUOdj)i|h71*$m}Yko-#+9u>)ett zOD*$yy25Fpa!%;PdK@H$Xw(>M@AKD$l~XVB`FnXQk3}zU#WM!+$XQGL2zKLPMtaN! zn?TxjroU9}Gl%ae@Km*N>*^Tg*neqC^OUHac02<~^q4_Fk5pOjD{G43wxzy?RCxk@ zrW>%ytF5g-%x&Q#mA4^CNSTmfpj=*lM_Y6Dscfyb9>BN_P(D(>E(%S%Uqlj2s^!=a z?b(x1$b1q6p}PBwuI& z3Y(bt1ro?#EFC;+n<&X98LvT4+~g1GJst*psQD@%`4Lhq zTzIv03Q5UQCkKsuN}B9-Qxh(ZCuPA9(kND7x^{+*f$-PlaKv{*W9^njR_3EVhtDn` zCbIEXAO{Z{njsK2)Mt;2@amOPP|!Ld@us#qmH#QmNe0zZySw`&hTD>>YwFevoxva!>&Mk1i$sBJ=@WvF6Y(REb%_?w=ktef$7#Rfv^^FHkJ#&6hg^XLJ32I7JFNTvq3%!LNzG}vsFE3gB8%-t@9$~BxMoGpGx zy?uYDP?Qoo9|p^n+$f;VwDaN7oOO(K=6OF?c_RB?dWzp=-p;lY{Sd8@MnRkmlM3}H z8XvMsj&Oqs#B1dF@TCTKmnGk7nV#ga10qe@@hb6{lti&B-z^j+)Sh~-=|HJijM%rZ zJ;%@aMEJO%224E~D-K6~`jkw6B|~8u#m|Z9AyOeXq~OqXj0SxBc}eDfABb|#>B$~1 zfBcIVW2Wb3idkC^8o1z`o>C`MwoA$G*Dj!(9gQqL+|~PqutoDvC!1Z|ERBWT0jq53(=wZFgP*p;HBJlDK&!JaDC4Zf1fd{O6V!56(}}G5L%I@4O}G459NzZ{Z$oBGV)?^tQ+(j`Uy*?&oy9A&tIoppC2WK7JW3Ko0bg zX7`T^E;DE<{C=`*G{Yg+$2kOk1k8~(7cf!=j}8vr-QBz+DoeK0Cy?!44{1()Yny`G zqiW)Pupsn@S1uL-ZIv}k)qqdx!>0pgiib#taXH7h)^%*u)S2Q!#JFE@S7PPHP*z?1C{T0k)KM0lPeb9D zBiW^w%kX0sI6rd6i#+c36vJ~qJZ*G5F?daE>p4h#F^zxkNTDg;cY}-FOe601GA*Ph zw9SD;@0CnyPGEiB1NwYjkZwC&*IG^^bpxW$E2gI2`L3)1T#mmpeK1*}g;)g-J7P>z z_{h-9r|~fE1dOkdWwF(sJK`mNMvD8q9SO{@eqlKMjb}syow|0vf$K2hj$ZeZWaa*BYT-G7asELL(S_e9ERabqHV9{iLMi3C7)^h~ zmzrd5sr&OEu1HKd#5vTXX&v&QzjLaikTCBCH^t!SV>77mhKGV4EK6@@nt7Z@T$fw} zL!4z1>!`hYbYsATBl=o5ER5%1R!ZP)3mi1To8=6vZp48-5DZir72Fk2lb+>gxy>d6 z0x_8hN0}a5K%#%5kkBzd$4$7AQz=N?j@6vrJMbe5S`#n*JL1qh-|Zgfg|98&{~lw6 zFUyoGebavf|Frr-IUMyf4ylYpNe7o6hQxVM=cVwLl&SVtktdv-2$u}KBQezDlRq^f zF?sNdXpzo!67$vQURH27h(wcS*=#>CD6_WW)9%hMd`!HQ2PQ7bR+&Fy@a}KLpD#=< zB(?z7pj*ngX72A>fy& zim6HcJ&Qn2WGj^nPn(=hnWJu_Po8Kn=cU1^vo&(JU%BQ&rdBVN@HC-&ncvwA3hBS6 z^ru*QIt#k%yBJrN&({{d`RKp`OuFZ`#>wjshh6WcR9})hHH8tU)H4g{lwB(82O+_? z1sW6XS?=cXv=i4bf)^cd4WWxH5X$5; zm8IBVeHt$(8YQmMBfa@aqsuzh!5tM}1%RDv2jAa;h9O-U&~L=r_S)lGBs3fn5uX4s zOi^M(#Ni1I&ZlkN=1xSNl}teu8*Iyw4wpDU{%*RVMhcR1~H z2b1e#ce7ktm+Md81Z0Fe-oaBE7I5zgr-H)Z0=X;-Ys++Fc3Xb8v=ZqcfFi=|)GG4- zqIernszD%KLw(73nyJMb)OnqBhsXi|jiAcV@h~-(o7gkUvzmtO6`5S$d)|`w7v<*< z>i{O%e++93(quBN`u_MhrV49y*@7w_F+g|oC|R8Dd6NQXgcY0AtA5Z_YJJT?|FSNP zoIgDU|8FpSqziaa0xe5e%trSeMTypcxM##@BQpUB6+0jyK2LgF@PMwevR-vi^?i?N zUcNjGzWMyGRx8 zp=H}6oEWAL|HcEbLK=Z3Gf8o)9r4CFB$VZA)gmr7$C`Lupga;IdOFpYDw{-`!l-ECAjwgrZ)CfVjI>fH zkSziGHhC3sk$h0>(Jnh9@oPdz)afuivt2JcPbY z+n#b+sH716wXFo_ALs@KCg)rtHck2^Iy#|YQHkT62Dh_uPyiOfnKeJ zA=S*Yq6E`a+F6Ip=;4fw$-46DgOEVJUjKUBrTp^ZhMJEg$sIso04{X{)_?-Ac^qFHU9lhmGZ> z+hKQ=&C2jzA!re7{m6M}IOf+xh#L`7S)pt)oSDE}H5nnWPi`0-jeN?s8B9EYjg5%% z>@q^cSAk1iqX~uMqCxxJpSJr*uQM^)7jpZWB`rwhba!j)Ql0(bU$karn*!up5ZQ~c zksrSw@tcVgk-Gkt|H`S&?n5Ckl(9ubtE!#3z^RjFpQ>9A-`nfRFYxduvHj(8{(}b* zt(pkWzM5*#c`cc&fti(p{WYw_JhGn86#fe)9mMI2hI%A5wpsfGA0*OXqWllE*CFsc z>&T`HlV{Cu;=xbZk;-4Z_-z|VxnwJG*R=7@^yx7g z7h${_Q@s8ve{&r>QW-*QW~$q6zgf33wAzN* zdqAKw-Z@dUt6Qr3IEKa3^C!)tE_WF4PCol{L~)prnz#7D%Pulb1r>>j^f~q8$-v!^ zEU#SC>IxzXj6LvaE9d}w%s~q$weu`vkyiU3vFE;4RZma=GK}h8^|%KwdwiS-sPpIx z1K;+d?fC_;1PP@<@9B0xVnPDw9=-vR;7zMwuA43@P6^&a9aTEN=cA3^*~?kgmSn6H z=a_YeALP;^cdW*DbGd2Iz<~S3mb?b14j$R?IP!X7fv^d!MC+bS1DW{>gLr?1(C{j#sA5R!sPH(L`izT)Z+MFwsx3dqo zyN?oJ?|i)Xg$Iaa@QPZ z@njDy+FH;^oBQtR)UdH4{o%BR{|L4geLq@NGPc1O?tA6bNB%CjXLnh3;8XDMG2FKJ zVAuL}U^m==#Ll`+F!;~LD%*SyjQRw-AyzAMUAt}@**=OX3T%&Th;dUHjGi$lVU}Gi zBv2~^bihVi4i4fAqfC~Io58W& zvA8)UDw?H)cjav+f+t>ui9`{KCuf4_Tt^@v6S$Yu2|DgD$?3Px}?J)>A*7 z-1m08d~dHmiuiaLVv?(-JKV54M5JX<<&N%OFwpM6^b=mz{C$|1h$e?u`UUH)m+5_< z55AS|hE**$jq+jn6UN3ZxR{J=(#4f1FY|GG9cZ<9?kBSvHY%;PqhKT9F{q?AHw(xs zE2oJC+%vM9wCCI8%YLF5;jD2Yo!6oi>8yy`c%-Ni6UzYDD+lYh<8gxS|L~-c)Fs01 zJq9Rg*;gT_BVt5Y+TT&LdNhTn_WN7bK4~>0iD_+e0&L&662uhg>$`L!TYDv>OC(BjfM&9OYI_4VCMX*oRA!m_tv+9>@BOR+!3Wyw4#@ zDPE`2XCU`RX>3FA+eh_TNc5+?^|6SdA?vNQyLw)>jUid>_>QLeu@K?xosBBCde0jX z5<-5pQuZt;GP~14sQP`R$f^eEXAR1MD7GHF&IN~wd>XxS_Hd-~VToiPl18VL@7-Zdm(}Y1kMsQt z*(;As0WK9JOmapX92{IqN_O39lcXV4D)|@CQ;Ez3DYxr1+{rtyeHfdR^O~ZjhS^B$ zTUdj4s)$dZED>s6dk4qR6jk~zTi^EOj%C)U$0LmTn@L5Ml4|_IWo{gns^ZNWPQ9mQ zvXPIchD<+35+K7fav6bFvamr3`>sv1<3%$k=Yly3wAgFX?l;4(Nb5GO6V4)pkHbh?wUq1G~aC2gGW z`)XIF)@aD=9_K*Gs%aH6sGW+n?z(32Q;3n?3hYy`wee~IZrZ244yz5{-Xt94_YYRS z)ud~8$aL&%W+Yy4YOPfZAl%gLKX3>vEOh~Bz?8CcGN_blJk;hxv{lt|vEoDZr>oyg z5A&0X*JnUfJ){`rqvd#OFLQnvrIn;5^Khl4G(+c0j~MNPUA2K129j94(-vs@zn>2c zKdk;&NpTX3+OeLuk)F4IZ|!xf(y0Eni5j_ooO<^p{~6!eKYpWK7V63!a8+}RM*bkb z{<3e1m2;8Ty=R7b_k3a4F(OA7tnT;pJEHyh)<~FvMq9mQ^dZWm_Do`lBa^0h0joV` z=ZT-6H(-0R!ohvd+Mu=V*t+HUF*KRkZffc<3~4Kh>dj~ojgV`Pe*rW&MzjD6uRo63 zxRKEVvIO2DfEqycbw&XOF3(q=3x|a(u^XYlIolClR8g;Z;2u}9UD%WSXEypU`mPNe-=vJ4!dv@zJZ#fCR z@FW0u=E1wH8@O@=4a&>kGY>jouv*J2?$Q)oiZblYj3EU8DWs#YammW>x}c1XB>Y0g zlEi7Np(@&_?z0Nr-(Y|WGy;=_ax2a}_#`+ca|q(gS@6`xNp`JmX6Gb32;`OpO#BJ3 z8#$QxA={Ug=^gwFcGy_Lk+Gp0&wo}aNdMA!hrkIunRVp^c&hy)lY)5P^Z-4#Hl^pz z{^!f{Xr}uc;~+P6#TzI=sI3G$KIcs{HI~UbDzSfiOA$N_5D48FX+u6g==1Qs0f;2o zbq1tRo?P4RyMw+4Pr;vYz+VM=@~dGSxnz#*cxoGT5=4fsUi#N(#W}6Lo_PGlN8~X~A{SMQgKePAQCN`va^n)k80;DBA8eJ6Yifdn-98n> zUVi6AiC?`3_{bx_wA{u6%(>CO#-O+1A$MfQBJE5U>65j%*fDMNih2)`3Id*g@z@0Za`LxIou2JQ<6oW1;vl+dh^YsJ*~kd}2eISOIVW!ZD|V#uvV7LqJHo z`L8~^J>9(p`q+$(LeeFL_i%B%?#JEej#?@q<|uE%Uv?*5D!Z~6vuoI=WAK8cyCuD< zlDQ(IB|F22`-0Zd*#?G{B9sbMs&Aet z%}PQf(LD*U1pXo*QlxqR^R_Q);av)#kgc<9yhod@?Q>? zU>gt3bz%p&3bhdF6>QQdVz9+um?TN#LriEpE!5FUsto+<(nwKNS-V8~&-$G|4PdQ!g*bkcS-z9eM=Drl>>pW@yTUBAX$qDuDt}pN5e}qx;kEc(B>{S zi&rLn1(35*aZ#VQLK1?@mh#J$O!?`3;xT*{w*LWEo4aF*^BhbZJ&3P}qH;)DzCB_WDf~K?LQ3{`wW4IkVvr@gokeVn%2~P31uQI z)&M3^lDO9i3kwCn5K8w&-uUW7UIhE=ax$iJs*w4d^`E~cq-`W%i?IQ^^6GUjkF>v? zEe6`x@PlthUIHI#`Z+$P0XKN;eg>_?=E)u=(BA3T)vO^uTE)at>{fBl?P|oarbCU~VK;nLhXCbJrdW$&PeQu2+~h!}qXsWE;`4#BB^oqeDe|$fQ(sDq_(aa!yts|rN5`ld8cD0fz))2a41Gz0`(QI9?%o73PM z;-4nZ>^EO$UGy)qD~oWk*ESpuQvqgGr>v|5zNqV}ZT1YI0mC;;Ix>WsYJ#I(2n3ld zbN#2B%a4SAef@=|j6+tv6MK44bP7NSumV^a^_#u;`o4Bqm90E|Xrb>$T3I}-p3;2l z&+H2w6%6w)Q1Hi>s}YNDiT@VWhhO$~foYznFWu2exK0@o2a0im1eIS0HJid~x9>5E zlgjam(-Yq$$THEeB9AmZEp+v{uv%;YvYsb zfnYd7u5acEl}Mv5vvxHj%K}?Q_RY+;=YhQ4T73(vaf?9W^V9LSc=Fd8SwC#)4ZWR{ z(ke0($8^|=?ey?kUj9_)*RFMVXUvvsYtFkEF%WTlwO0_DBH#ZLI~9_Ll_!#lNg+HO zy@;bxVgTsd`fwl!2@+7)+o=dnE2S#tHT9(1%+517eGj>`s@D7 z*5a9EeZbzer)uA)^W7&0{U0_Zfy3(h4Y6T%BSSICbTGoqN{ooWMw zqsD=|{{he0%T&!lz~f-p`Jg|2Y%T0~tc42z#rg++y^b+h>(5Eq@-^=Ab^6>ur!oQ= zmy?6S;LT zDu_x{{_)JL?d;oGZ$eddHgruc!P9ZCt*&-H*YBP?>H`}SrAPoLI)89888Y)6@X4*+t2Q_x;+ETI1?n=|(&0j^c)@3v$6qOsKFirHI zQ(_{tds5(8Ee;8~ro1_rS>pYr(uaKe_Mfn@xxM|K@(;z<=z8+2bNN?4h`yJ<`=yL1 zW(h*@pP}sN_!@Z>Y(`iMenR&FvHMi}uyWw5&N5PBy@tU-Y)u3Guj#rKZgl;ws5k0t z+UN1}O!gP6a&6bh=O3#(9Z>xjM2gAgD_*ju$W1%u+TpM@{R4GX8$C_=@K#7o`L-rx zW^_%=NFjmZ$N?*t{*+3b6g;Bv4yUe|dei!qO*|oa+jyrcxaX_Z_WF9F%tV_%JQKKWty~Mx>+R-gx`v~EzDXz59 zVIprqoO1e)IrS&s(G&*Iv~cG;C2Lnu{_r>YA$^^0S*9-^V;r(TfR!t1-tB*VVQiIY z!nzj=XC@tP_#IGdBK~sTa7nVNTt5E|pY>g@>gbomb!iiorPvZLV4q$KYkTL^L}D_5 zDP!p&VcXNRFiJ+IiV!N`evVl_dnQqD4UPxirvN%FN7Z zA4=e<9+iv%B;BX~ax4_>aRMP5A@?)73h;-L|@qcIT%>BqW<~;H4XYaLs3p$D+0)au*&jRg*v4ZG8 zsHN+}=QXHeaMR1kobAvlG`jkp#aw@4-VDHnCg<910G3=T7cl_1URmSt{=SgtwWBps z?~?|Rt7S}hYvQ(LX{;q{H{V5B+@^BmgjHG$oR}a~yCS3p-Oq8l^w!zI2-16yKkzDu zQf?!|_X@<2vlF3nvN+@&7fA{{)WeEl+05tB1g=EWp9i1bsrFT!ZG6?Vvr7psmS)Sg z_p$tN3UgX_HxEqI>Tx!i!>#d0tB3TNE5M@4QLm7@Exoh@$Vmam+aVLI)MhIyRj% z4wwo|NN4z=T=i;`a)=!p&FbxEmrD0v00=G4<;!GlGkWTbdek{Cw$G{^-nK^9Js9@^ z{)dhC6EWxc+D_M3s;NJ{Z)|{UFaIE*Gmp#!xHQbUD~Oq7{oh&vAfY>n>p7ty17H9iD96UjrAHH22bk}Ea7zJ2oGrZAZ=08*#GxWPqynLr`Z_Z{v40?!nJwv1l|NKm=p}MB`u%K6OT@U ziALHn0PJ3ofdY^bXyivh--&K{NLGi>1>3-ykogAJs}bxGK^(Y9X;V&X|RbDB${P~ry}et`3ehMh!2p}AQOoaLXfB;WZL=@4Yi02OoVeKAFZ}b zQu@mxtRh0?wJ(leFwceHJOwy;!^UF>_dsbQc%y8WIM1Jgp>YEe95&3_~z6W&((iIeK{Fn zbFxHIaJQttXgB`-;~^V3=V4aeA)Y=4+bpcM<=l`w6)|0iW7L0F&EagJed6i!@nl=V z)Ku~mObSQY-FagLMMiEz;p<%Wj;=$WB6bl6GzDF`)c;0TTgCGoR9$_0_{+LMvd4@% zhz+Wgeqs14%rYDJ11^gT`|L~D*syd&mb)m_V1@TLQT6oTjzpiAX89V#9B3}1#${y- z{T2sQW)|XyaMm?J}Ya@ zSvVWpjfrqyM~yt_m)QCLDOYl{oI)1)pVoKl`V)~G58Qj z|GL1CGsE%!ouE4p;)9Qigc}Mj)wfzc0yY0_v0-*_eIN3VS9w0MsH4Ymx3u^oq`szb zp~$Jv+WHh=T~C*S159rjB#o0u!f~~qFuI~*5W>1YOJbYK5MQ`IZ~)$_*v?>M^k&Wd zl#FvLAkzmzt;kbW(4XVm^fmd%@LnMT)&qRNYHBo-kGUfQ0vcB|#u_bC4Qxl)e`AXJ zSwKH>lCR|3qs{4aoA=xU!y;W=<1N*0QeUSe9#~@R5AU0)u(q+mba#0CtelI*qA?(} zZvOOL2iDI4Jv=N<@98?sDpLY`cl@zg*EuMTN}U0Q!&N<2+k zIlXYuz2jNA<3UV>1-IM+uZ41g)S-?rf1ag48CSn#9?JTTECXJ}s>=w(X#+pf-z7_B z_LIm@e-ge}sfv$IBxBqd&es385bL2ftqs67zqsWD9Hcip3NGDCY4%AC&47UvGCv8V zR%;o;Pp~TJvGQ_AeG5rmlrS4Y*nZMPKN|1MkHLMq#$$l(NhRmVM2|RxL0bhHh1GPZ zp7?{`Boy~@m7u321~4`sRIn#!A{D=qPl!bI^^OBqu@^+Dd!ym@&8TXlSp+&fJOio{ zn+vIQknNkW!&ORf654+PlL?Uq)}KC5Va|adwBs>v_N+SuS8kN|G{*cZnXzy#!UUur z;s3fhNMk-t?f1~dAS5CbYbmXvIAK0EGOg8^WUm3Q5I39?hws=s<_3pxnaS1V z?Ib>tkv_ZdJMPR+kvcxqG`R*EhnfpVD=9%Qx?lYV{>GPozQ98>i-r4m`;At`uzSXe zr&oD>8wHlkvdvKJ_u2u|RN}d*j)L4kkE|K4P%87pSRpYh2k8*Su#_16I28Z!EIdNv zPVoB%SAaX(c9TKZ+~+JI#T@qvo31|~=6LA;zR91IHp^#cfS#jJW>C+x&=WtN8r7q-htq@&OUh66(l5vec6W+A`0g*7{Lm9 z9DpcPf;`rrz*e_LIAV9^o(!$lEEhI`bWW+@=%7)`5+y1?OECPN#3G#u z&*WtJccja+{J+EG|I}5Vvuu(^Y}x+5$H{1+p}IT!WTPuxmY<+z#NHYYJFFd&;c>T3AOj``^TPUik0w9&%Xdm)J3(Qn1QqF&KORst?xd901&JqE#Tf8;8ZI+T`u%W%9*RcFMav1deB zx7b4)EpXAZgyx0(zHhr`@$O@Hyc=uPSd-E-lVFke*77;luLzF|Hl3B8JE23z{NxRT*>?$&>I*nZ#N&vR}J4=6Qq+@4h8d> z25BNefiOTS>cPX%b=1o* zUp%0YhqFWSXXRBs-BMZ|F;SE#OUlwVLfVU)4<8{e&paxf*>>rm(BPWapOE~$%V0x@ zGzWK_8}uOt9E14>xi&c+QF#utSK2UxT1P`D3XyAVg zxFsp_^QDFFBOYew7C768`1c~H8Zrlt7fvmAMB>bH3Id~Ew7su(!PuTPhuJ)`=aDcO z?1lwPP8X|ZHO0}bD~7Zc&vhe|Y^w4V_F*N}-obrJTyxD|V7=OFG)Cl1tO;2U$Cwo8 z1-lO_epwi;UnSSPb1Wa$Afv5S@3R>9y4bN!W;4y>G@Dl-4w{ZVeJ3&0+$zE8(s{R) zh9168d!8_ij(A*U617TM?Oik*p7p*}L_oypDurOB6kQm5?eL)8854J_)~!3V2JRF#4*-vsEPlZ1|4-I zB%qEk^!ab`D*7nD|7$k0Aq9_D_t7?k?68VS^xky=WCQi88`H2Om}-1#Rz>=gl(^7A z{q(%+q6(t8b0)9gdK)@CzMI>`%zV|v0uet0K-JFy{gC&BxzA?Zt)PQGFjZr4Y0vRR zX$(Cy2&mGd@zd`ce>)Hw2l35dy>lLsVr+ni-<5VV@+} zc%)?-=bcxTjBFa6sSO#1^g&dHFj(pr&eqefR0f;E4C@}hW)8zE&gRFp0#5TKIt{se z`|Tn7+k^ZIAA$4GhoN{zY8o2xAS`m0T#2Xzug5#7X8Y}}ll9(~=lNPwoL8?Xe*gZ> zVL8X9S#O>%;Cdy7h)xI=4aNirK^1g4ZsI??MBuji4ow$CqEgw^5-PNFx&@Vb3CJ@T zHly5pRD8UjZke3lAzbgO_yPkh)yT}UbQN&ZD!BFgru!=23S1G{$y?;XQ(CR}%^R4K zc=jVNR)Tee6&%hCci_(K_I;YD&zT1oTLJPQU!lzY9w4v>=0ywhcr~48kj^nd)AIet z@v)8IXg0LFgxi5-QJaY;Bx6>#CBoVYK#ho`+1~u+wt&%_>K5$TF#1xKwCXbmT~E!V zBVw!O4EX5e(+)HAST=}s@*`~Z-*c*as9u=%=})gHakM@T0r6BWL(|z!+LD_H*jx5* zTN8~L7R$kbKYE(gpxy-3lOV#32xGaXIOVzi5W-0L=?v{U5 z6&s)DPMyaUtbMGF_vslz@#n^hAANm4D+J#gU(6{gm8!`~HM^+A!`@>af6wsD*;+qu z-4}U;BjU3T_z_9Ma;>`@T^C#oWbP6qx4Hyr(^h$B0&uOWwb#;4!tmCC)SzY$MU3^ExYD$|y=q{yZqvFwkF6>Bvb zD6qU{Z*pXFX2jEei-*!6SIi65P|D*xQ1}EmNUWAK6McLPYb*fO5-RINn`w-Ar_qb1 zM*H+H25=nTyV(?8rttHQ?@me{*F|7NV;Pq|Qnwd0en)A&mn`+V%=Zd5=eQh%{PUV_ zpyE#h>i;FMxyO4mVY=a#$|g8!4zu44gxN)#%ey?pu?w&_4=_J`h4aZ@BjAHnzKG4Q z317K)Q`-Ogz{waIuMQq}hb4~Xdj?;{v^huZEWB0CGV7}PiKJJe*KDwloP*F7Kn@~9U8I< z+a3?p;h<~Ce7IYzl~ptp>KWG1O~0k@9yDGa^Gko?z{^g+0U<>CwB`6(WCVx1KtxZtvmT+CMnZ? zt?OOtXwhu4YK#P6zJcwF9eEQz2fS7>^BkMiI7#ST8Y~XhAer*Dk&A^vl83$+d7d)3 zfM_um>mo5_gi(69#5y_3nNA^VoZ4!Ro8T7)m|U6x)NR1hVV=j2R1%>xZEnbfr%aDg z*>oNiwdYrSm$bZCEMy{LXwRzq0Yx^YmU;B%J7n3n+vJN!q8{YZCm3El?nnIep{m+t z?*^P6PuwtuWcHZZ%bjSMck~IC(DQ4Ub)_Y?G3U?xb;{&4Lh~wE+>0n#)TvtphX$Ew z8moiDd)^MaLpQhYc0}8h_fcGnGAtMa!7bl5h}KzsV_t(q_VJ$jj|OIqPkcf?bBU2c zf7`{4P!VY{cmvHt`KAZ{F<7gQS?lP3Xz=6?fE)F5rf^W*}7(OG0{uv;|ylnTRi}xzU*_%Cex&C<~0|z)n;Z-MwG!rlWNen{gXe8e#b^+9+-qg$8_(5#-g^$G$#R>iNl$6-8$xhK6<(YJRku z1B~l2+FSMZb&iIt=hQ}F=-E@>{n4f{=(*83bH!~J^`O53RL=k zXLyqvoWhxwc$obY#dQ5*F8{rl*Tidp&fMRIZ{24}(I7$#=mp5tM?2rj2raqY%n$Lg z$=eAqd1zR~uNHTQzN0%XUu|pHnFyFszr5A}}jjgM8DR4b?SV?{X3(ed9xKD8Yuy&oa!* zr;&19k}3sKESGWxx+SnxALNG|_5{$YqE%Wp^d{^i9n#T8=SXt%L;*4K*Qfrrie;aI zi7YEwDZuZr>)jSqwf1T)3Xhx|++^jFt*emh-FE`V>vJIF!sSOaW=oaGP z#HV?iCQiMXG}-Yi5@8`+(>2loZL}JgkFXu~NnMDaW`qS9XEXGoKeJZu8IWzkl^6_S zc^rc2Lf92Kv1=jbS=tzv6KGb)!U#iUNO89y)sC3AdGjQO4_}o%3_~)z(Vv$^FRybe zed4+~)^@^vm87(sU1858Kg>GU=NK2wbUIdMiXb*N-2IfYU&SKk=pZXRiWyh6SfT^iBPyVFKg zEhKqT!d4?9d0yTrk2$fPHYU$3HHgE=K$y?Xx$sR(hmMxBIR&FL3$j4f*yvCoQeob8 zmn4eCZ!tAX4a#3LEC~f8DT5DzC}%sHjKxDr;#$rDE2*dHgrN6B@YJ6w6!dgK!92Uofg)s{8RO<=e(@L)U9&}D!N z#hfis&+YPyzfSQSzAZQ+DmVzDNk4?&bvIPIj@Ns-uVs;D7v@A$Q9X!nWD$Dph#jDe zxq_c)L=d-WRDJH<`K5oY2` z0=k%j7Eqna~t5s#>UndhK_B-QM#ZS!ZueO5ZTCO_Z$k-z3yqh>`L{&8r_6LV>> zZgU{&lZr~*a2lWT)6>1pOPH?F=+gYz!O^F9=5~*JZSe>)`T;qznd{C8lFMQNH%E2u zBW>1AnuvrZv(2rpyPwH7pC+IN%4|?m8N|V*~8Y zMn_4gBw+B<{UIxdft0)_My*b@nkF?`_<{a-T7^=lEQYEe8#eowlUUEHWqqAPsX~3` zJpI-bjzcJv&G&WXVo3_R-jcFG;^lCZBNl3bG zNZTFJ_pv9%f-3(Opd#;rPB%p${h@VMEGBs9faL<;gc8UYjuL-plCj!bvEqMa#HSfqRc;0>dD)qO~`C@vLv zU~IgQFefvBPZeSlxwQP90(jkK(P63RY9IHhq;5saAH4zt)uk;Pu%R@PeIH!x6ywRU z(U&H%R+eV);(&J-B64)p*!Bo5e%$JTL9(LvRsof*Z^q`iOnB+GMjMKJl^p{{fcxMJcH7u^b*5e{Ol2|zN$%)5G;$00)ZSm; z?$eq$u4rA4jJqpCva$7FYTDf=m|Cna<^-R1pI7D@NF(F^kBcjio#lcvN5*xItjX zDBq%NPoJ*9*6c@`lSuu=k7#-<2HLsSCVAKUrFQDPVm#dMjR+b5FW= zSj|p!gT{9O`n0D*yrU&@U)5`!>3|p&^hGb=s@o?R-UWCZfcGX|TVcEOns{3DP$=Tv zFz(|3;0bPidw=uOqVpJTfW4A)0FSonLsTf3QwYqwRMhSovC(QaaL4fsP3s7BZKAN=;L(x=&8iF>F!wg|t&Z4RA;SFP=;YJAgm}Xl;Z<9e5-6&CM*LD_Et+ zNR>8;Cvwbr*U@D5f4dc#e>FReanLJQgj12P7PtyytuD=Uc~0ML-hpM zPU;!$o;jtHam>D7k(=6e)n}xB84C5zZRMr8e-+zyVN(zOYZ>p^IK$|VFb{2QKF2wr zRjkapw)H-B7)14QkY4l(`R1$KV$p{g`Hr448&dPb(a_%N5V$*{N4hOjeGwq&H%Rz{ zh_Ozfn`LZlbg^i3=3HWiR6+es$TH^qNj)CgC^AZLW&2J57Orn83;ez8K*>z^O_`j$akTFtOrZ@^wi;P5$d&wDM3f~f zfQ>=RLBK&$<<-rnB&$BqC*{p}ALIxOnEZ!F<0tm^=VP1BqF2I|--Hk9%jnS}Wrj|K zy4=|gO|v<_>WTOfepd+}UlXkKiRl1f8yqH2(8DK%HP<<=mUa`++|@2Tlh1z>2Yj~M zlVtaYbCK;44@H@kj1CW`$`wI^B^5naQe>8lXxRQ!JoSywtgMXmJ_Eib2xO#+C}AZC zad;ZEtreAZy4Xt+QiolpK#B#=+PmA%IqSbR6n4+>o*@x&-#m))dT~|bv1h6~!as9M z(v-a^3_c+*63rSM+Yr>K4pS!Z@+qf|ANG`I$f zdyWc4o?7PH^28F3Y7g^~0@TTVCC z(CXcnew}vsE!jCAgXYNsHg?{^U6k)vXvcuScfPkPSbq#V2DCkHa_Cfl#;jQ2SICsu z*mVzuO}5|c#BvFBJ&@t(11J@(%SzY8pb2A13u?+aVS(%@fOF44xJpPU9Fnk1{PV0< zPv+7aK150?3OVBNtdyg#5z4@2fB;>zsoLM5ocFRycFY9XnE`zQ8Zgk2^D=lDI9rW; z(RAQfd?e$)!cf;k)?JJo!x%jDRjFQHT7}&by)7~J6YD;EH<>>>H>y-E&UagSLC#_6 zGzNT|JWC0fW{bfNS94Mb!XFq7pte#->^7?|g0p4mh8%CO(1`eIM1ycL5nU9bL-h~S zfG;xv7%w~r1cBU4zI_i$ZHH~_412nf%C{l)f*02S zZ0g{c`-8#y?dg7d;^4;Aw;6zN1gGKYs&<#Yp`~wgLp)!R?GKk#S(xpW>P|y5*#uKS zMnuQX*eZ3AGs@v*dTjY>0TTfZpA&0k-ZDK5G~wW%p|c{7pZAb~8S9Mx;Z z+ER8#kQp5D!#(c>(pdLBsub_FhyzB5;m05Lywxs`C zXc=W)zc9I5S!+bybN62%H0Cnj5`T0x4Wbh;zb8l~8FW#ayTVPIoN>o0Iih(c1|vYR zA6*rIFw__zJctoMa0)r?gUzNIB4hB{biXp`xb>#O3uTbomN#?0bLpb?2#1_ML}D z92f29qx}cxvb|V6+?tNIY+ZOm^*zgjSdBXGk&xi-+)|X8@nx&-QuWYGJ!zcLnauOq zGT*#bP$J~WP12u@cx`U`1z?@BOV8?Csa`_ny@zR10U}W~lCYu_Flx>EL1xUDsC4P* zrQSJ+*^tMKBcN+(M@o+k2oO;5-)2F2UKGS`m}kV9B9btuB*44>MOu~@fbbfZ5eRnf z!{v=6tOa^D-5-#OPNb@{awyiE+c1%2LeRc0PgDeP_4Ubt4q1~MICPivsmR+XHzSe3Ws1ol3lcj4C2pgPLY8VZ?* z^P}ocW5&&sO1--i3PDm%{{HwlaIHY%!&Qj$)z1BF==<4XW+#JPx4YslkEN;v|FunS zsHY@G2o?iNO=jln()h`1?86VbRf2df%*0r!z#vx^@}{G8`ObLzbyF9K(!0c8!>G}y z(P*0SmjPLX>=r&NGRkTZF$h1E%Lgc~bJ{|IvxATPz&KlhEz|3xNF|JFC>a9VKvFyj zI92=xZhoTp_)*09fXn?x^#_NGpb}kUPq8hC4z*xP!1@wx%!0p@whM%HJH5c8YS3c2 zqU(WY?=75Uuszq4nzFQTwGR?JRlurPcpQ4%tA;na0$;uqM@4DAFoh6VFmi1h8#8r( z0SLDli`_LSL{Gl;>cOMnv@6@za3y)&?MUqfUcrjjSkyX!T|!f$$a{f@scZ1ENXML>4rVe<+uiw68ktjJHNXaNd7mPw7CF)!8( z05?9K$)ufIQ>j)M^Yvg>tDl*$BoqO^d_L#{Xv4Q8;sLYR7iYC|&B{PQdH?(7FI5&? zsy88KxwMyY0Qs&|gnH5Cxe>pw8j%gnl+WzN2`hDZ^C&pO$|wx_6<1{#gPyVU$tLpS zeW3-tyuHhCUsUpM`RmPAg_eg^4(oM=_7l;4T4%F430lKRXd% zz4N4_QE5p1(ip<_mTBcM{5XN4eTs`EJI> zJrzQ}RzNd(_xDJR3Pjt>O&r7FxTC{5y^Npt#_n{#_(E~4zGsFm*s|3GAe)ZQ*7`|W zJ&pfcmX9rVbyh+N1YNir7D1bRCSY%!=K=f_8aB)Wut01@N~Jks1J^M^xvf%_ zb1b1Nxh+FOn+}e`^n`h8C#{O2Nn?ObCwn0JYMu5?P419HcOZxGKPlPQ9Y9!6<)xVo zzLDrKK!-pk{?eN>{5;-T2Fr^CeP9^|d%zY@)AXfW^DAXF8I?hwHo1I&?^ZX>~u!|2{7rEaNHg><+3=AynDzfslpP^gSaoJGF0S2RKQ6JiGUs%|g8ci|(wcUB! z-D$4o(~X>Z2ZnDx3SDFR1DD1_97^I>7yh2Q<914nb$}M*^Lf|)+y|X6r#_OrJQMt1 z0BgQ2oy|gf!bR$AcgOP<)NH=7W6E5Seh5>C(!k@w!v zVU&@*U#)#xK06GcF7MoS%EFHu*a7u|!D0PJhVsUd)i?G*7Guiou1BveEKk~f>Y&wA z4V7Ssz}?;Exy)%ANC+EHnH|qb8p>@b#$nREy^B1mINb#3JNa_4#3?1;r}<940*&0R zO6<%J!XSNQGzS^F^=MMq4Zu>Cyb<;1MNcwLk>>3&b!SXrM=`i-aF&vZjR5${r)RNI zUcoJtz=D*c#n&B;2jf=E2iYwqUD}JJ+za-qw97NV-08r06-pA!;ClUY-j=sYSGMa44VyITorb8!c{cL6 zbnnO=C9wz#GB@h!s3IV9<+JCa#&do*9TX))meX?|5S&i?{l0RdPUg?N1X8^WqY;9A zSJZRc>{ty(3Z`rD&EsRf(lW;cZ%#qL$C>Y;jzfd1+un)K4;SPBelm`XNO)E&}GtTRoj>p7qC#D zr1(Q>My_AfCr|2`>9zfcA4!>rYXL~@7*Uo>k#41Qv?VlEaR|5eETyS>Hw4*iI={G# zmsPxA7bolJ&^W-PEjKxH^PWKBUIP`f@TdIcCpcUGkvHY01JTchg&t}W4%_XISnKXnHjS;jB*PCPU9JH6`cO85H0djv5}jl^TUnM=KBQ$! zT>zp1I3(p3uB>S`6D2IHU-!pH6ZUTf)}bOJdj$uqIrehb`x^_~%ES@y&Ze9%*q;8B z!sr23PXEUlnsPlQNR{QZ5Px0(%J8~ekVcp!%QRc;|du39bvs#|nI^5P%x@yrKYrTJd3{=7}FS}ip`d4r9a zKlGH};rAmM8x0%Jq-A>n->p0tK>Ai+2bFqr4}ihSg)^ z<0^0EQbvG9BpXy|BAOcr6u*RdkKiuG^1T_db(pluH&ba1lg*Jd9%g}z+^`tg==)5- zfV?Q1p%ZQ0{rq++%J(eQa<|a?9smQSsDCu?KXLUm>Tggi`NC$BP(*#!v$qY5<%e(< z_zV4Dx>s8m&y5x@^J|W0+^9i-$26~u>Cd0o#_MT+fVI;3_h{E!*d1#v_+s=t|B0Ne zt(lrfS6fL5!GSc%Uae88w93;-*MCgj(|#COBs}Y+QIE}Os{d&1DxYo3ZR;knurw-P z0Ofm|vY%7!5vYByln$a~45BDLk{?-AIhYZb? z>8v_w;34gn2Q4}8Yi>!$$yQU^#m*Yiv1LL!kJ_M{O{7hj@ELAi!aLf=zOT0^_+#|7 zKXa}Qlu^uOP!zj6`j84ft+Fq3M3cd7Qoj39^rKg$>jqH1AIR!A+;{uYvn%|2%~*Fd zSw7alfP{wNrOh^YhY)<>eH47S&SG(kD56`taBinDy5#Ay3{Gb45hP975-xDDvu`=u zRvH{}Uf4Au-kU1lUl!`+9&d zg2K|MPA2@6er_G!huO>-OBDj1?VV2*77#~~63)}!gu1{PfE@t#@+m@%*gZp|V`vEa z5k+@L0aj4EY4k?JQwKYMOF6^Cu zAU5O_>7Uf)ICaAhBMCBM184zwY?va{BBA{NC8;jl1R%?(!Wt6Nj#|70%Mc?GBpE7b zfm2Uoi4e;M&!O`#DGe)FNwOP47K@pmt3oAcjSkbgVpPJ}O@GZ;gi!;~D=w=ZQINsf z%HiSJA5S0Vz*iV`-u1ETTsX#aijgQqxGW|7FN&vmXZPF;+$hAwUHp&Zc0#B8%8OY5 zks2f-{>jy2iXZr`tMso&rrlPXt~QCKc^2#K0GEbP#?{S{&f&il7zRO+TC10x%qAgh z?<`T!1f~Tecnp?hl+4h`rp28CVMB)o6!1NOB|c83i}mQ{ahX^+Pe_PNGS$mbb$Yvh zF1@LnNyf_zG>s6pL{ouWl##jj4*m9(f=-&k_Pf`VH^ad;HoqKMcaUFxbEc$OjM8ER zJi169K^Su{{5dNi9SMjddl2+&eQBn<3h=}n{o)`@Wqj%?l;D( z=~ih_$bq_;wij(}SCNbVl&4!G5kVPwe!SA-Ld~Ws6y8OoGz_|K)tiT@g&#h&S>Jjz z^5nI|(i8DjO4kXh8&x=>r0tr89@bOy>zg3>w6XBR<|&oIj}vtf3VB9-SXWi>GTu__ z7O0LIi`y47H=aU~_-h@j!=p9?_&$xL4#X@Z)Nvqp$YSyTHZ&AUH4<#v^|GS}QOgFK zuMCMd6OK}Nd5sY6TxgD&LgxBmgL^QP;5B$_U!q=u0MLj1Np!7uG52F zw8O;sOt3L*kKbx>voyc>;vhbe5=#fclJ-(a1BnnTb~FQX46tAJP1jSgUA{yj7)wX3 z8jn_<;iA%U&W-th6q=r-T9QlaK$ z*6y33pw6L(?D$I;42s_!dT1|X)Szo$DT|<&m>#43;^O41*HkVA-?A;)b<}*CAQYoH z@4{M=E@Br%C}881i&((i;O>xCj>NyuZ^{}9iz(eQ5e1GEK7S#;n><$kY})~BKv~F1 z;9^JFgV?d6&i2a-b@S(o{~?oM={8I=De|Ah7|9ADZ`#ffT;T3*cemvb_KHN<~cMy3j0{v!L?7yTegcJ*eNsamj z9XKF2%eP~h6TSA)5XP5L8X4c$B|&r0^?JX4G)e=3davp6EYwXye|XosQncIGLS%^5 zj8K~vV25iZ@gT)@_@bJXX?0%hjAN~?{Ek1yJJ#^~w6p7yI=^E}jj~w5`umuHlLqG3 zm`tL$)gfJS)y~8lKwu}5;%r8jjud!!_~Q{bPfo*8YK})YDznkKO-6%H!(it!0bqGZ zFO^Uvk7|yC^YL+h<4_LM`BO=Jr@{2-vz)v;b5cYsmzh(EJ1;kwv@!a<6m;}FT_?8hUa}G zEC!~c@>u&pA;Kv_7{w2Bh;qQyhlor(=4W+wuy(@CrfJ(0;r)jj6_FUTx7stkeSKJ=Uk2x}nt!A{47QUJr->@ekLqbd1mL6$!%)CP zV3M4|k3)WMW2wrNJ&8IEs_OQM3aa}6l){VfPW6NBLs8&kzSXe_AiE;k=W0-9d%F1! ze(Hfv;xBH$J|VmykQt;$w-YgbbrNGZV}O<&NY~nEZD&j!Hn~bKM81*30}0fxNK%_AinSRxjJtM*r+vhZ|ON zf;Yx~)lsXn?G66kV)DRxnhgn@y=a`dpYcKLrnFEBo~GAEwIje zw0ePP;*myA{aarjQ`py}PUc9k?ovX8nAr@juFiT$pgHp?q9+sSj}ytSI4LW8bqH|$ zv@=T?T=`1;k)8|XpFJifSQ=6vC7|=kW*mKoFlq(xX(o#N0TSHt`=2FDc>k$aS8EDn z=6NZ5orDQ_+0CjQm8dZQsU*E65C>^ayg6Y(B0!4DvnQc3Z9jU4xe7;pU767YdWETH``yux0Zv>ZY@0HL5cgb{RUO=Fgw+Qq)V!Fhh!UXtC zA<6&E*Yz!^0tKQW%Kv318AW2SS)&hWggW6}t}=wK1t;uVYKfvTy$ms%RYmaJVPUy{ zUj-T?EitjFZT&u}LXsgI`&pitIL!k>#A(K>;WJsO1|2P7>t2 z_b$5vwx+Rv+Nvk+ZUICjS}|cmYCG6x{*#Llt}&sjY~-}J*IiWjXR`9jpFhp9bw7<5 zx(HHMN9`EISKKu27<)!U07A_N6pu5H^B^UeLDt1Jpn#3Gb;^^r zXZPH0A^JuVkoR(hMC)LCG$k}fARsL|9|3>C$CW)7`4(Hr49bJ|X$5hQVU=7o!EZk% z_EkYn?x-YX%9;g!xkbic;!)3LWHyer+xr)x9OW%{RcW*GmpAdM?qcV;XHgb}YA>AQ zxn5jF_EXR(P!XEEii^W)TYkYGJQHkP1s_`~H+3p}H0lgXS`MsUfF>=MA`bd1|@lHDB~*P>~1G zrJcPF)0(aNGJJRs95bF+8K6H1>2QhI4A5O@1za$)Pc)F=`Sq0%12YARd{u%QTgZ>$ zl}N!9$Z>o~e<)qwtxDgn`StKB!cH};LO(BX1X__pq(qXX7e{olAy@FF$hbc(>}!b| zikV688qpI5eTzAp)v?;?^QF93*JGIf0l$jTop1Fw{;NsZun-|5 z+Gu3s-$wnLjSLQ3wZu2=w1r?}^vzG<@7VKA?YID6iz<0m+f|{v!-zjU`HG`NgeQ>5 zqiP^(zh!LpAYt>jl8$9LiB6g;iMrS`a&CVj(!eBzGRvhWt+x*vnhBgYNR|1E%2m-@`L4)UYI>%VcG!o=ws)`0&`us>8%qDy(#xvW!i}LtF*QbGaficCUuIE&k zVeaeDRUwTMt}?px;QDA`=|`H*r&GQCcg?w@s|A4ay84&fPhkG785eWgE=-(v#IB~0 z@G21f37t+*qo^+z*%njx;j!zT$-Cb9fqxpb};%?K#0mqQ*(HpLnO)qPgmWW z=QywpLJIl@!Jx)UD;VTB^@#nNjvi6=fd0j3?RB>=JOL<0 zylBir^8}US){u|!3w}tdQNc@-B>L%_VPodW+A7x?eAf$#6O<6!i_o`s>Oj<>m-zs# zx4Z}Kfc1|^2J^Px8r^=M7x@-Jo`f&*Vb1!>54M2yhL4IYL|GLMhrZ-9MEWFliD~g( z;bVZKc@|Zmm2kgIk*sC9ik%+ce{6$2XX}0Pu%HUtw;UD1VB$UTgmNU zVR}5i)}t~}j3X01p8K-TT~aTzvOe;5G<=dzZ;uZR7o5yp$gBd3IDzuk^RICn9$nod z6~cA1=1rh&Z*YKpG#!~_Y~d0U+PeqqFqBaK6BLTq{G5?+c|2eDmZg;$LoiStG(G-K zRn`wL3Q0v!Qy_rxhg9gOpRc{2X*(ZpOI0e>KfYw707(whUs10_c~9OK6EYddYjQFC z4^3wo7G>M6QM$XELAs^Tf*SZxf^a3ABenqZUc)HziewmwmkK6O`j=Bu_xj4}c+2^3VYFDOFD8*{*v8TuF z&gF64Nls#sj1ol8;%NE-$QxYoMJqCy<#q#f4y)y5E~vXCZ_VVeZOaIFeAd}yG@}3> zemvCS=Mp;F=%lLNKpz;u4;bg{E3$!;Bz*wC#bXf`{)mfiV+6<33@{Y4$M;C^i@#__lv{ z^D5LX)id#@a<3hhPQ|SZL0d?Fh&(cKo^G-=4zo}d@Kh1^=RnhgRN*&cK+j0SNlU2a zjWXyBtJpQ^5!Q8Ws3|qEMFW_O`GIA z^mP*pAUj1hSs@yV*1B66`$Rq=CbA1am@##cY;;yqzb$NT_q<78SViEPc~Af(B36JP zciP%an6!%%?Ae3pv{7HgZDfqxIFp&{e%F<9Cw1T33huJmb?wWijxWa~M}n^K_MPORhRQ6C z_y)+4tKEq>OV06rpNEyEQZpln!Ih^RRh<0@>!=8+2>G&uYPqsH$WgxE1x1h?4wx_& z{T%b+U-I1EhE~;XwV7F&)JOHNCcLMwkGrEN)19rv!9I?R;@m~p`~s;uo)OHy=^R-% zss?h%8Oy&)q}h%8xevzFUZUEOE;AcbZOTzu7pMirzZ2X74;bkTY?t`1rw8Yt!k~7L z4tdwR*yu51R8swAyO7X;Co}nQ zKoO$d1b5W$8AydyO?;B`7Qn0D#K(Y3%jKAV1Mbnoequnlu_!6v-Z00H-^L@cmcYUi%9(9pzY6fO_=#u>9uE%Uaxt3JtsIT zX)_pJC=!xL)HpyhF6|0Eq3}j!u;$gpb4Y$YzTQNf4JU9y)9FMHO>-{C53XXmthmPO z2YSrWx60@HO|TZ**X88^eG~mDfOqO&z>g$&x|g1xhE@Xu%=2ypbgH6@M^AB+m2Pk4 zVKmv#{5V(iK^A#Yh_v;Xh!r%rquQi!9-MQJcx`-Pw6tpI3vD^?!k?PE1 zyEA-i#G(3}%f`Pe`ZI!u;d+ zT`dhsA$U2WdxOMw9YBXiSI%Hxd%HDXm68U{%~aTYAv9VLAHxxa*9kC=P})B}o$Ob(OYBbXwnwy}@AXX;W3oH9M7 zpE$4yfdXZOj?V1}#TVNM2MGqWdey6}ez$^(gJEaRsc8_UK(G$Z?;Li`E?LCPl6m z6|S~J1bh+G;Y=$By9i8et)}NAC~-Tkp@c*G!}1(r+8$5KT$0nh)I4~RuF9`$megQJ zz9xtHTA)u^W2UN1mf>VR{emeOoCNni+&i0)s z?br1N`y}Fkhd1{{d@RsGz+a=cjWzf*Q#BMh$1Ht1kvQjjou}@5(v768b0O_37(sIa zJTN{9skyQskgC>^;aqg5{Bv@H#OS6fK3pG0|9beSF&&Mtf8)-61 zQ%z87np)kd9+-vx0a~towZK+VOY2dm)S$e&xsY8Gn5}$iE@KI{!7)X9WH@6ue<0-) z;N4G;D9pt}`Iz|%=qJ^huz8>e1%~mzTh)zkEn6OuYFT1-Xg^|3O=n(qw9MQ}rM*jO znW3Qm@V*ZR=aqIuWf@vJ`z=%V7!}3nx1%W;fa(nkiUo-yT}P3XM~HN@6Ih3|>I2JI z9lTI36s&?*ie8;`u{~l}rls_#gOmht&7;SP3>w*X`wAi-Z1Y(mS)r6+G?DU!Q}gQc zt*Dr$)toHjRXRup5GnKZP$MZ*zj#-_AX9y=!gt`m3j}u4+Ude$MV0j|P!vjcn68JS zFA)RRJUfFWQNfr*YYw#jmbdvO)%#C0dId%hM;!JlP&4=eOYi_dUWeY0!J~zuhGIhD zl!XqmDa}RY%whK?W3dqe%N?bTHo?Ro;+wgl1E1=Y{*STQjvH~I(k4M-S%YOvENX^1 z;+L~#>3*MslF%!W`N3FRD%{5j8ylu%UrI|QnQV!4BX>hN*j z`)HSbdJ^edz=WA1}5AL7`?4+s!9x4N^~cs*eX-So;$6^L@u@K*Vqyc^bUP97SE zb7Q-ct8|mg?0uY@tizXn4i3MDsU2-3-B%im?j5K)eoD_N;usWOiu<_9`7_F&a*l%u zRNugOj6QLk$6UY+ws7T!-55@t~ zvT~Xz!TF%o0BCH4m;@p!^bDjnl=m%3Xn0!RF;blq+b+lq#r+uvLnKXPR%Fv4ec-HH zOK3j<1G9gK3PM9M-UXo>=0_4(6F|*2KUBN6P{>)^Ne}iLCg9iUIH( zp}f_Uv1-85ij4Lp+)%m~>eIW?cz9FKruA!|l zT>dl(Un39aB5f|tPAJ(0LbHg|@$c03{brhDAwKN(Om24oT4rXos^X%pggpi|5x7Gf zO4uWh9fj=_goehvv{Y=WjSOSmirFoZYNHDSSv_naTOM;x>#RpBcvmq7vU)Y_`tJoy zy$E6QNRG)LfR8k!yq5LI z;>w0+RcQZR?PB&70s7mr*ph=MUT74M%VyO`ta0{>xLJ|-Dtq)@g|DNpNu0y%kx3u& zbbA*Htaio0f6pI7*0q@ETZWV=-qWTJPU%j|t zFtKaB$zj(3+Z_W)FEjtAnELSF5UC+jfzGz-CRe z!`zy*v8LAR=1AZHOKG0bTSSO5D` z$@!O`q0#}sV<4inwzB%B0O$%&=URGP`Aj*k>q{!ij&@#I4UY8m?Bo-Bd^R{EUw*a(t1ZD2_?Bm$3$or%-h6V<2oUFiyu7J`vbd8MU}IvM zd&(czAZcjo4AV2!u4f!gG9%K083LORI}rM^_!r;Mjtpr(<@p4d{SgxXD3uhtwB2XX z*30Fx@xDAydHao1&s?nj6=W;+EoXS7SKA!+QiN#Op%(Dy9!PHCiLz_XVK(-a{h2KK zQCyZ!rfJ@js8E(9LD}OAVuk7f$2gCr6)xi-L<;){=B}H6I0h)<9-b?;FO<-Lm>01h z1N&}ZOlkh28e*j41N$URooH=|{tX0b{Ar|C0pT>A-n7{wt=6d!i zJi#Z^7TN`HiYKi;&Bkr4IV}h_@T=B zw*c~f`! zC%K_wIri}F?tB;ismQ+1mD}Arb(3HINbw2^{XMSH;}}tb!dK=#EUtc^j3~;}W$_)F z715d44MxP@-}kBPkN`g?NhI=yt_kP?IL&C%eH(gGCVe}$jOA?@Ga^3@p7E0`J1*@f= zhzyWc)^wl^V+>x-2I z0h5ZiG$C5C8#g=n6u$+Q)4LFUM>dZ~TCwl>N~~Zg1OeM2@|j&{#EeRr>g?tDS_z4n zV65P!M@5KNYdt_FyB8fXoQx7i(=fgjBkAJfB1KqJ?$QKF4 zP^4ibXL9;zdBAN^&|(hg?p|xee}sQzD=)98U3x1tleE%s*90o5Wh>VuP?+krKYDlM z1;rKw23a+zI8jmrAbc}TE3+hJ4GAwg&*Cf{l3%(O5PtWFGWDh2LSEheXy+*^7CW_g z?UVHxqQzLra)}?37Id9w*}e#45W9Yy=B3*xvX#J%1+a|?>wmGXs(Nc#?0}i!2AlDMo+>~a%h6XLc{q`> za3(iM`784GRdzQKY_3*4*b8&d2)FR<`O(pP|Hc0f>NJ`E=J@kbZU+9CAdj-HX+|j} z!JGz&h?|+b-SK$Ek8cE6v=~auIN& zH4}evLJQJeCVV+Cb=O5*=xWLGM^79^nJ!)6j#%x1gk2b(jtN3!|v3XpMPVuPDK1PJcacilGtODeyKgQGcD_ehfCR!%n*BRnge(fO41 z)1S#m)M35fyz90;2(E79)!u$m@oOvD*4|UcAzh&nUwb_-i4nXc%;^)w6XSdOM7~VE zvzyNQ&JAXIV@4uK6q2u#z_6h$Mp6@k$YX!hK;eq@x9?=3)5(wUEAWYDbyZ{_#3c2# z2Z}s`bV|KTYL7gO#PHnY2GK654y}Isb7YPW0pa1E(S1sR?xEy#qQCA#@`XXwN3Ofv z+HHr-UEnAf)P1R?;C9%CVHq;=Sh`ku8TN-|M?BWt`4p1cgC?%;O#6O!tkJ#cENzIZ z{Y#Rc^NHTADK(-jKVE{R4#<-POq zH~t3$k&HdwB;wJcB~38rN)90!#U~=+xVP9i#7h=I&aibs5OjIu7tbi9SxiTt2s;d^ zud+oMdm&{=P4DCWmT;ldi%MJKi7=l1ungt{LR|))J|8}ts!AX-TSyr`@&nurJJMe= z{6+Y%z{>uWnB4>Yn)#Lu(v$2vniR?ta=Mh(cJ?zGULNf$bN>XX?OG20Ci%H&r3?Lze(tUxA?6%Zq5JOSq)X+in3ANM(0J0QB;P}Xn@=0&woN}4{=ae)CFr(W>+pJFv}3@*J_V!G|u^`$ZG~O<6%p#Ba?RD z@#wHz+bJ|g_JWc&VrOU(dZ!oA4Y%WH{@<}Oh^@{vNfPG9gVZvDu}0^G4 zFtM?r5#`bIZttux51aIy)cMmw%;Eo#P~g{>WqQ19{WSEr2+fIbOgh?BC*TBhRR&39 z9Kg)DbecoB8{+xv?p?I_`jF4WF-*SsRN1wrcz)UAztO-#8CnB6b zf8kT-%2B2NPw8LXPYccNRnD1~guHP5{XWKFGE^@YJ3Kl%GjK6E)jH-uxHJePP@NU; z06KLqBR^M`na3C3;ANlf!@NTt0n3$=+&^^ZsGaUC$ePMnKTVG5)#a%q>;ts_a5Lq1Hlql3z&>6F7qDL`0-@!->gmAY z4eJ*O(BVAfk1t5t0qT79l>tHhD4#YYP;pkCvo?Ko;^{+zHkB>{?O@%8MD72|KjXsp zl_yMgAYUt0&Pn+6Gh((a%Ap|+SuFOH#k}~n!m{alEpU=%<2bHi6H%wp0BfNIfPVrL z%Kps1GH$9a(b*sRHO3(|&k>?fsEj3sjkz+b8^HqM zh|5>B6IE<#TzO9fTg&T_WI|2GOxhG(6&fln+yD3Yjbp?@syOtK<>l$g#%{j(8^+42 z!gl;}!WUo%XQiBzX?wCl`4!2;*Ud?}(b27<7jECdOYYUaHiEZ}U}^FqXv_r*IL-y= zNz>7nC$w-RXe8RMqLD`c&7FA!38>mknUT4Rv@O9a{9k+kP{mCl06OoQeWUxv+~mIx z5c1YSPR}iY9N@ICRcfvf zEvuh1D`4rx_tZxFxUGtwCLAz3Cu2o-D`QsYGVO~3&Rpp{u7Xa6(BqyKpfv_X9A)eQ9MM5__Ep_V&<|24v0Jyi(4JU>eaRy+E z%Wpxp|_F8p|H`x5x$6|FE-Nr2q~D&koS=q z&6sUi8^K!Abgr%~ykARbv=T5HsCQtK^2c){GImB!n=E1MI2&l!uNqW5DJdxOwdZj7 zeuQf~sDF6AoOww;93NF!U^buTZi{CD$^JdG@XrC}u%Vq95tFHvYG0Uyx(Nan;&kS{ zb$b5P1LWXFRM*4&mN|J%p+#uHhPMT`RJ;beH? z=FzM;d>!`eOzt!o!q7@GQ(cKXOoPOgS@clA(+a&JCgvG%4D!OJhyRLfAt@h)8WX|X zTJvv8nI1=%B9qpwSp|ZoGYXBpvedz>eOl)B$7Vs22bd)|XgIk231AXnWtxH-W64`d z+isZF$zRJL^M0+EuYEEm8Mv+fxdxiwp}+^3Xee)7btwyz@IyysKMoG3gLWK99C~dX zz|rERrI(O&XKJ1z1#}PsqX8^`aV05x(dbKM8O(45pfX@qLWI{TmG%B3*j`^DmSV=o z7~De_)Ufu5h)Olv>0;cFTyrvsiIVXrjq|d>!vxEd(>C{lh8k>SM>_%#)4X`B;3MRk zVCQL^CI2)4^9n1Vj+}lC9Q_}O=n2!8$7{_ zE;F|-@;m9G_wjhZc*x&xf<&y9wa%hP!om9U#l43y%=xR%8+1-r$>hyug$(#Lp}_5* zZts_Q>*+Z5M! zB88CWLJsjhN6;!00dWoRg0VRvZx8bfbLti=wh53?f| z@%h38Q>nFwgV^r>dx;5o3-@*<6(Htpx!;C&2CG4*(BJ#b#ZuBwXsUX-3e4k%3I z%Fi1+&0iXM4$_KB&lP36feE8t$ zu%x<->QO(7?8hV`X0rH7*nh`|#P+GYlsIoC((#aPt{2LzfZI0NL&G6StzucHx@mtj z1KbwlH#UkSiyYtTQUos$?{!7<$dC9xoHwqI^70Jzzx)MSWUd#_R4REj*^I1a!)k~U zh9W`D6m~f1Sc9Ji_%t^Tsxz6{S9p`jv8#T2q;Z;uuo<_POC6MqJ-!S%rc}M)d{^WZ+0E6vy4mE087Qzy$cI^nJCenZy0)- zeOaT}7WRR^!dDxT@`BDf=_pzpO)Z>y2KIaJ=~?6r+pzzXC=*qiLtf)emiyScI(+~V z;@?Le;$`_y;HfmXn#on=_6WaC*-crVq1)h}@096_f*r6W%n{~B&xzJ5CyU!3S>^y2jT?t$X* z$iRvXUAqdaf?%MkYK|ED%hPG#-a$^_byN6gthd(QqbLi`Ur3zYB~O+>7Yvt_2E5@h z(wR-v_gNl&TVU*RD^4=-Q-kkTg*F;lVPNwMQH8?RCQwSk1!wfYwM6qsTPha#aJrPz z&|xqq5i+%$b-cb;@={m;XXF_~-8UAub0y`X^|=qs-J{Z@=4NSj*b@$5v|S4INJuds z#`#_Syf9aE$~78uy-|XSr0C5TnHS5Z$f4LyPGmA+C$P5~-OaG`Ng{RSlM{C7nveV- zC2&HrZ*l9#NzJ{ng3!8$L6#%$-};JpI}W*4eU(?PzmBy3497{*(P74N!7?$F{dBHs z>J(?q^7oQQ##q@r1dEe&w*7RcT2mf8`Ax|*ZYRj_!|d$9MB=M$=0Bl#?+~Ur&w{!i zU?(H<$c4&a_c*$q>9QI5T^Jx(cUW3N>4egTrhnDlyDb)(omK5K4F#2_yv5y;W7O2j z^a$MoW;${3u^S;wZV!6Yrnaq5Q{G*r+FV|;+7TxKaBv9`K*0s6{1{fVdnw`ksp*rF z^{*-e%`Q9RrL=a%YpNys>h+E(lCZ%2Q%ONTfb=NnW0`Q=*2LX+;Eyd&^J8hdsY0S6 z%BP3=wAOJoB#-PX}|-NPcti>GWv8O{#{AtuUr-G8pksRK$(hX25=x*5`!UT z)ScMH+R!@M`hUHWOrP0KQQ1AwN=NbN-7GSddG zKpZ^AP=4VK)kf{Hita|)Pw0x_MPp$r1X2uEVJ@u$g|1#w)X@}6G&6!vpzcBB-uWd$ zVhMirdpxP&2S|M4;N*e{vj6FoN~Lz~#oYs{)O!J zt353;g_p!4iycNHlJZC@dXw?1Q;l=VAq`CUZ?m3DY#jL@VTOj~gO_ki^oTy`Y zR0P$;@VCup+a<}=G8!=OQ}4?;BJi-5N;^@aPCA=YNZ{a#N3ZF7K^~-wZ$mXI4UEU<!KR`?rcCRp1V7q_20se^j zOPE3}9Bl3J+iyJVJ%a44sCpWqG~}Ez1G5L{OJ|a!9n+-wnu`JcrB_cI=vr7~DR8X! zEie1UCQ8bA4#}p-$dMc*Uuxu#T!S|hew8ege*NE<7(MWL%~R|?%rhmBIatP8UipLS zI9U+b;rh2pZw)-U75o!=+r>E`)C^_Di%<9lSkZ?875}f+=cVBn?DseR(xL#At z9l)SDoof!ixcCDSGX-v_A@Yq4 zB82dR=S++okU$4#A1pv~7Xzl-j9XR%UG<24u_(ouaE*mVpZi9*T@JmL+MO%QGCbEB z%A1#Nj!!Jvqgx+;A1k{U1_Tp)<%$k1(sLlL3=0+tX(6oZr7p1Dy^z}_~RRV%p>z{~&X+#j>FCVPxZ}{4OFI?HgQsk8&`;3ce4kP#qK`XBa zw?tMsEY)F=Qb2ZfQ&%EULzy51D6jO~IG8w8c?`)o`0Ya+fs(4BEbyAWyHB^0rDXCb zE7D+mAd9daD6=E+t zgdz69U?JRfZjzkO`)#&ya&>Q%1hvY11TM_5O~h9H$eqsf!^31o5|@J; zV`}^s_#oF=_Nki!h6g%>eRGucb$}d_$9A}Q?+Mgk_gzcmqz|+}=lvvhYd>n0SGnc4 zo3X#$I)-*Bu)S{8UnBGYG4;a@@fYBdG6`Lrx)b9Xf1L%;IL$TDYyL2y)T!P&;PkzA z@P7DPIrs+nMEmx^4SPsP}c0J!K;m-Yx%xZVne=8!$EMRIf zAHFr^#V#%xOM0JCMrI0t!L;bZURu0Y7!~TVFhj%O4}C3GB=HLxM1BaqCGh^Ghyg2E z;1G2Zv+C*{ty=~fj-&};&;KWX86O9sl)?=@3~oT3W^CbXx`c`6Bli9E{KuYqsm+il zy#bJ5W2R|Kd6H~IGMCFTKOPJ-pXqM~^I}gl*9bc1i9M$EfueiWU@qmzC_}!jdXqD? z_HhBPs7>D)f9MBzwsEII2JPS}Dg4`%)GUz2tc<`VC1l1V=Lgf5 z6WK2J|7~b2vn2s~kym~J{gq^I;+E$V_iT!hn`=40_V#K|Nhsh_Ey?J5pQBf6I5i)x z>%S6JSQ91_*Yp-fAcd&YH46T%X`Uu?TQ(C}iD_GXyMM~%4iVh|#W6)&qo_3W_mxys5bT+2 z8erxnrJcn9@IEOe0N82Ii~!Y30;I=9OdxAF7~vv0iPSX&U!A!AS=ya)R`t?dxl~13 zViyt2iTgVPdC2UcvFL+d>Wcs!A~uW+Vw@zd-hQO>sjVE$&TV5uG*AseA2T28LtB~x zwd+Rsu~T_c`A`Mak0V_)aGErDSct;~)WJ&2bt;h+_Mulx511i$p}UGXo;K(0fcJ&W z#>GYF+cvP%u7E7g-h%jZ`PgRwyKcSw-Mr0t7Am%m@F33ZYH+H2$`j-l^0^MP4(mNS zWH3M55A2YQAPl@zjWN+s3aQw#)e8UAf1RKzO)<`t99kd!#MFnF1q^OuU-A&K@*5kQ ziOz2gZ2==wq3Zha(llqC3LV{#$8oei5VKh^oMf}1f0=+hr8X<9j&ZqeKjL0NZg0;EVxuT&aB;J(Z7#4{ zpLkU)v!9yzKMG;9L{GVYp2aD7qYvUMPV3wMhxV7Tlg5$u&s6^=|2CyO+8B?)aGV+$ zBd)0M@ZG~uV+h*e4($-OQVbFM?#RrLqL`TflfEbQIt!!o@q*Vl(T+9Z>~#sD`spPZ zVc+IR2VY#ytiQ!C>qz)oJChKo*dWP^?}um_&~r-QE_8-xo&&%8P2=xXeL}QCg>E=W zZgmdAAq{EH?sr$DYdSNWEoXtLPg+_Gu?lp1{G}WAk91POtB6_(Zk_Eg%=ohVL-28l zG+_wk1&@B0e~P$|5iSrW_RU9>)a$=kX^Qiu?($V^1qMF6SF!YpGsi#(6JHCl6vjq- zDV8}rTXVd4o-zEy_-*4X8I$QfkgIw0dwz~4b4FqTj{)z^KZ6j}L{bbY%{T!VH7O^L zRA9n&fHM$*1e$8)@CMDlDs|4mKyeKZx8~F(6X~N+WDj79y?WWaP>&Jg^xPIaNN-jN z0mef>3GkQvpOi(#GJf{*b?iNRK!bw!p_Kq5QUuYzj*GqFYK?#M2?0E#$#rlhpB0d5|=33tX?&X!~r161@~@ z3IC^#A~RrO8w<`|yC?7RDeMWKH_ifB)~8=DNl?_@-dgcx(G8|$il{ah{9E_1ZX7cf z)!DDkVoxyH(6R^_4B?$*_@iDDj686ia_CWxFoH0#6sW0ZH{Dsq(j0AACQw14JOl&A z$SXvYV20ICI~HeQ(Z8As8aRkxKZM3Z_{fqgSDW+$p6?i!zIat9fpf$HlhW%y4kubh z^6pV?hrIu&x&Cwjdn<~g^pVgx0?xXxzo?I=$bOgLag9j?c)=?nh;`oE$#uFtnKTWh zS7|9p)0ziBBBi9ymgn|Cj#A898&qhTg5ciIY-lL6_&`Rp-oNiwvPQBIb@x3)0(BQJ z0PCJdf)|76J0GpM#!v7?;RHjvcs=kMs{7Oend&py!e^>wXb zBUw9n8Zv%M1%3r2kN&reW>J>I5xmaapKm90ouxR}?w?N)T}7j!TIFOF`_I1h^hBQ& z>ewm{lyX6f$;08RBPUL22<;)iR;A-BH;N{rr)lB#MVkumv5 z8a;gI(CtpR6IV&_%a1t@rk;`Wk2#oN`I4Y?;UZD->QV~-`|EEb$&5-~UM;bioT_hp zufU2rEYP-&QM$W*p(n#AXN*=@2ZVm&A{l4zm*(;q5L?|BeDfOi2HE&daB6XB^g` z)&86S^(YX>hv+vT@X$)9GCYg2jM`yok*dRk8$5+Nt1jygTOjS7gu_p(0M8fP{!?0$ ziZU^i<88UsP>m~)#LBhoikWVFF=+$cTy&NZh0; z`q^TqC6P5@QK^e%F+Qr$9@=BkoSJWjh;kSRCZ8g%=R~kGI~sC>Q+&%qoM%7Z^^))Do7*#U%p+YnB)luC?+1mx znD0MtX?AD~V3cVQ1(AKJp!~OqM$epfz_q~>qv|EeS)JK#8P!RZHDwIdel|P^+|L?7 z)sEjZX)5xU2F;?DS21I?7aNI}4kA1Pty{rdZICC3Fm$1bWXDiqEr|5FVVu@_r}aIz zcaMI0y?dL{&p<)_xBq_E(oVO(=V`q<%UDP5w~0ymQLeC8k!>n+TszDu_&SWCscl`#;oyAV_X)hccy_j+-ip2cI;o7 zzil|ZMq*d7^L_b3OAl(btdY!L_@c;o2RP&!w5-)N-QJv%$iolAUs6d+<$9?z=QOWj zSZ#HXBcdRqk8Dz}O5KpEv&`K|5KvZJeuuU4TKx$Im$U*H-dX?5>7-emY*?nPRi>k` z#n(x;&=6l@jTj$KkzwrenogMj2cxur`&i9d56zEU>|${Sc29-FLS_HEn;`DxTb$ge z+^#@?cvxJr9PcqKEb;GBbY)*Nz04qNVoyXuGdF5e-=u!>*oJE8A#R(Hq^LaohETxh z-~L5)N7@kof&dE~5jzd-E5F2lP|jAlCm_U6lbwhv4!d)x&@B^D8_gCt@_0nv*z1c7sX19#K7S;0D-4=Hu;`XC^;tv?cc%qtHc64{ zhyWoY8s^l=l5Nr1(@6rJ+?wEfbf-A2q!2hHFTt=z=iEOpg}UloE&#Y&QcaR*h`gz+ zoCQsyRrwLs0P^9BSK@4mmddg_M(Xc8_#%VLkKC4n80qiq^4q;%6sfwrK-oV%llWG? zmBaq+G$iMKG!AlWjI~WtIHyM2p}z&@cDOjJoJ|}&UEM0UOm^PdMckYVr(zvf&+&l- zX+#zX3RR%;Up4j*PRUl)$^)Li}ohSt2%aTYl*nMKE zCe*Y%O>zH(sDI+UG0XSfSWds&VkAeI9t(vly*z%hY<)bArQ7&$L`k0dr^6c+g2}lJ zxTb&+#4r*tr~?u)-PORQiz{@N<{qs+pB3yNZ}G+7c{a?m=doAPt?lr^t4lDx2mOb& zi547MLX-jW%#C5qGgM?ewEp$J01p&+f#I*`}T1LNmvKRO!H&LI%;TSA6kAj?4-XOOs6>p9&D-sNKI{yLz%XM?+*x z=}mJ3em=KioO%{i_L)jt9GPxbR-XgDNsea(#O4%oK8&{~c)vkIlSgxF*LY8$2e(~& z(L5G0o zvKlGhc5BOsS3So!RWg+Vgrsg7t!n^p6T=);DVdXP+za>-17S**#No&wx7 zU5Syo{^v~r4k$$6cgK&>DBWpC?N98dv;NBDq7$=O3{N3D3Ik>m1*^$|&Ylufr1ZLu z@Kt>Y@L!Qo-V;v7tn*Y^p3&Sy?@S?a-c4a!IM(8GFQL>)6pW>{B5< zQFF?#(`RCMpswq6$**bsL_bA?r(TtBW*vY`8jOwb0D6-)WNu^^FGnbeh$$YekrkB0 zvv;m>WJE0i?0$cl?4bDV>1W9(h+6l3wgaXZ40WKN^iPf;B=fCgV1eHXd)d@K$3tJI zCeV7b{Vn77jJoXk!}cEfZu3T9GU3l;-hfbzGZr=}iQEW~b?CyQ)keD`K3{go z6Xu8c#4Bcnq?S*v#oI`?N?~6x(S4L8Tp@2?nu~oL!ZtN2=M%W13QY{bkoAiWht`fGMgy0HSEPDVyqiJnGX-%Mzv)}YA34_*6A z0@s4vof3^m-eY7z8uM!X=a0MSKgY*2OOuy;g*Jd?e14=BNGU*y4PH0hjE0*D++B+O z{S4{axBw9X(VydF=ip{utPS*9fNFi-wwR(tfqNZz%v(}+V!A%hd@s^JP+vm)ij@7s zyDPzXM$O*qQe@wEY^5Y#1Mr@~&M3sInN|Q-NU44dOWF}D;s?P8}_8Y^bS{tRrn$OC|PX0xN7_YNv|j+?HIuU68nY zj}N{y)g~<5Vr$O*$}y2tw%b-rCgZlXfP6XDU-J9d`b_!C7`+Mrc|2{vL??Y8eE#n# zg7AB{GmT>d$#uuTacBzn$E3|^N07u)d0^-P==XqdP<(V|^v&W#igEV`NeL_K0Mw&! zH)ifEMLwt!fUKV1L6Lx?r#ZovlQiGEOD8q5uSZ|6KOW;<@}gHLIsgJz$&t=ZN8_sF z2Ku(s%KDW1FQ1N9(eTZxF1Qh?nQx>p->ksMT=RIY_o`M`?d6;X(8jKb_tjRo$CL*XiNnuM?JA#SaS%?x+?JUr>1WFjX>89oc1rUS}7I zF8L8K+Jh2zoF7fUtT_p<_iP#FP8OZMZN zOPE@TGX8b!CT-N&xu&EvL`(=7AGvmNnNB&2poG1QZ59!l{OHb&=O+iBoq_k5*YKtH zFVN+yPel%`b=+S14YA)zSm9;=eN$nMx$?(BF79WfgAc%ce($g?yREn*8GJ{F5UR19 zyf0}qvdv#DjT%!SF^8ENk~oTF7xV#!8MWBS3mwEkQ)7MML}tp#ny8)Ea>)<<-b^W7R#ZyHP8 zW|1nk7XgBU`Y>2-WZzfR4_c}*e<1s_Tp82ub!9YyL?yE*px=k7hu;BnVo(*w6o>#K z1Q9oMy4^R4B@45ln#+ex`Sk$44Pu$`~R&gxqog}Ndi7OJI#)t|9*(S?CN@{ut$Nq z26ODAxM2 z>-5d#gQvL&0=CII&C!=?h~r4+Uhkgv-9gUe#r+8@J7t^2I5*$CI1q-PMK3R+!pY=d z*G-W=avJCT*w66>;_!bYU1MA%UK?$)ZBCo5&6;f6ZQQWAHruwjVXKXsZQHfk+-$zH z|M%N`YUbCy_t81eIS^UQ%&L7OU=qu*4Q}sMGCb!5GK}?2yERp;Kk#P=>hfnZYiNMd z#)g*6bHt0+f_DG?fF<7P6jd44No!q3j`)|n;RzitYBr$rl`r?c%J5PFB@%ac`cu#X z2s%uCqv^U|zig>uQA;j3@jE8TZgleEzbVhN{8(7pKbOsqeyB)CR% zGZoWFny1&DZ|X31xJ*5Qii!#V3C*8s>&&r%X9PA>(E$v9WKssr|FJXg1W{3S0)*0F z3v5rbdceM;l)ip`1En!4>})O7cg?TRd&4tc{4S0t&ZYF{Ka|a&2=oM@m zmTGHc=L?`yTspeH?%I530E@4rNN&3p>*mP*e(XkG7j}1IDbxQI_)lxxXgB%~b9u4d zVS|c{a_c}g2X=*BUHLaJT13K|hyJntq6Btc$My%%D`ngLS+d$sCD}LFyZxhKtu`leL4`FmX^2r<-)eTq{IA_FJzX&&&fc7po;Pn`*6L{K zRW19Mok#zj;}xu9(iH>LJRTsL;pS{0!Y|Ybt==Adtwhfm@EVtWNlbdA4H%Ol28f77 zTD+JzBdMe8O*yEge=|)A9nO~#y!+nxK(;AH#nv2mof(Pim-=jAVV{{l6z}_GkNtr7 zJ)4B0fGs!}CEf5-S(7046HGJ(4I?Ra(BNGvr&sE5-_rMa|KrCiQ`IsjDgq(v((EoT zeH&V}!Zh6u-=-B9DQcO9=agg@5YCFM&!>6}b8hXx6W8)jVvDZB^;11Pu+O@> zjf1mlV*ebHt^O1?$_tZz6Fz|R=Y@LL9j9=(>UyJBXf!gi7Hllq3gY&q2mzlKn3ko$p^N|h&esYvs2g%*Nt`#V=iJP0R%ua|fij)aO~Gg-4<5P!3GR!yzp z{QSBu-~@-OX4LocK$8GRSUxe8dblRNfZcjtntA5w*jxtdK`H)vVf5e9 zUeAN7F5KgklpWpyTS=9DSA;=5YzS$Wd;&wywr zE3kG+tztR}u`C~$6UG3ZjLoVef4G%@0n=wO8Ao_M?YWs2cun*qi-W;lS8>HEFL^l9 zdULDMj^YAJ$A{*HT{5 zT@Ktdx;m@bBXT%eM&nU4Of^h2QRc@@ODHyS(x7;J3z0U@j?tb`#s0!wT+{_0;x5j& z=II*{^p%g1hnpm6dnZ#_+T=)Y2-5yw<>_-ym-~nMc^3jLwfrK)@tVhiEG6JeIjtF7 zkvOAtE}^#qB6K;lG=w_JYwR%`H{=2xn6JC!vo{CgbC?U+aEIkd&%~r9?z^HUgX>OS zuvTXpcB1;5@q-(;jIe!*df(&aDuvykF>F__8#_^zD;}=>z zla!`d_JgTz$?wW0z5Pq`KdS{4>MNgo?8D{apuiV$0?{0673F^_WkcUuGbwMl6jLoK zvql@>@KStAu3ErZPW9j%><0VUr*E+Zhx_tP`AbMLIlf%F#-73J$4z1ijS-5)JQqB_ zH^~@tzl~6?<{0_@{)EBBA8h@%hl=i^gwUC)Z?k7x%Cmr(=^8!GW8x(qwL2ukq(~Jo zrCs)5O#MaenaMh58y^l1L?&1G(R4RZaKzUcGXG6Y0&H)V{>AHp;9C#?daJ>2vj&0; z0^;LVMqEVwBhJ69$nj-C!(go!KKlu{oBQ{12YMVcuf5Gahl6(yHC}i8%f#!4p0=NP z?pa%dVFcW)mIq4=Fg;l_60rtk<#iDUaoh-fUf@W;Dum1^jzzKaAVuG)(vXxZk|Dn% zfZIwZ`cM8Uis_$GAjo@B+P7Z=x44AC9p|oKX<8*(7z!;4tK*8NdD-|Tk3soD z1wE&=W}0q;%}B$kl`PjZ0>_bqTIPcYemr08V}DvA4=wEpS9QS)c+qklERIk|W&#;# zav3dMnbvmtESphh9a(>j&)jsrIc<~V_E!Gcenx)MP^gRqb>HeWn&#JsI$DjZodiiK zhIB+ebVb={Aa)lFq#1?y()V zrJrKaC2a%_R{H@enY|RKEt2kM7wx+9?+OGw_gvihUl&YVK3_{^7&WlN(h>G?eMc-O zCnkQ)w=a$}cLegEghh@Q-Pipm>)LW-lUo6(p_;@rsM;%krIQRpb>9{;Q2Zw2P z*7JR(YqeCNuGPTjoI`<`^V_Q8^^Dx|xu1#!ku-i{Z`0uPr@`E;aMYvb1?B`h%3@9f z5+)8;^oGjINsBvH*Qm8@H;M}|vETjrATQva`~x@ma&hy6yW1%0Bm*dc()bl=Yj6?Z=hc845CRkuHJDbHtO zL@2t;tbo`=aW#k%6V~J2f%sy)C-Wj-#S&fs_Kfdp8Y!;vmWkp1Z|%Fwyo{TDW#>WIl6jpl%3x3X$8xf ziKtu;6%V8G`2qtFv!fFbW>Df{ZJiS-!N$%R#7W*YhWZK`9dqOfF_yy!T3kIh0z-q1 zU0yJV-hb>9gA$W|W5fzV@X`*g!!|EL0y}_8W8*471b9g2(>|Cnw5e$pJOX4RAUlB~ zv?3}tLN=(L-O-5tFMC4Ag&kcaTkFGC76X zUwsS|{@B5_M23@O0Nsn`&+N&638^3SdD#X zo9|LApy9W=Ge`dr{(Am$y)C3#Y?Xm6$8Z@!tGUg67f};?C^;=7rjhK|HTzpfz1gFq zfm;@hJ3OZO_=h2}I@>$$z#DmS}GHey~Y90=aXZaJ_sMuwuCoiS1 zEPr9SG^Y3rrD!?^hN!7C`nrMw2}L1<5#Xt0hslx1>6BgPW@leAqXXTeYk?&O8ZX z{;`>i)PR77@t`U9(Q@js$tEOomg&CP*tFRht!V&Ro0C?2&)`L*1oSNXeNTD<^zo?J z2Vq zzDQ#2>U#GQxDQ$yQaUZBiE_=xjDXT=k`WaRe7)>L)0NUL5cODT^0Pk*^rGZfw42A8 z`K6ox0xb@?2Z&>fDT)qsLgXF_Jk<_B6cuF0TpGDVxWZg2Z)x|uQ(YPQSIagEX6^3; zsX5(lE3P=W#y%NB-1Q%&ycK_W(VcBJ{Yxo?;R~uUzO-a=YC&1GV^&GROBcfhnO%9~ zMJUA`ZJKmY)A&#I-^9gDWI2jJQ_E~OR$uJDrExp$YW$!FR7UwczXfX`=tTF+8n#N}i&!pk=f z;!MGycK^${j5NxuN?)1oYwiE}%us{hY?`hwGigSZ#s2q;_s3ra4Ge8(XAaVi zuEN2rU(8A=K@VJGl}l;KUrj7Y7T~g^!9yE%(=rGyWtJO)=Uoq0%gwKJMs+S`yBIi} zPf@E|W@5?b?nxiS<@@WlHLXiOGQWapK6bgShd%($(!OY;nO8|bp}@~umVeZKNGgI| zjeT*VzqU|;$UpREj5yKu*n$DF?(k4tT^_Nov##QlO5!9UhP)0=-`BB!Cg-(d{q|q0 z)lX4Pcz~j%RTgcYuvf=LC#1bzeXuD26PB}R>4xc+Lax{B`Z}L5B>Otsh;A?bA6VR` zWe_XT2pm4CS8AyYGLlxDVHe}me&Ue930=`ce2i-=)r&N{;OhS-J|b z7h~>6orL^VM~X*1{}B-FFP>i%IK&7oaPVOli#Kmn3#18Up0{$QkqL|aYyNn-ty-ne zOMov7np_tkx6CC@X5z-B@mQZ{>Qi5-x$Q5aoZ<(4Mb2K)*g163^1#`8s1Juv^mw)HP=>V2x3X_LBH_VdajPnbxAs79Z zdH4;I%hc2*&%Ra1kyCpz+tD+HF2%Ej1vy1SYoIhD3ZZTg3BS9R|Q-7_IKj*;n3q3@Gaw9 zyq`je$L?3dCj{Ru|7!phl*edq-)^y@zDC(f{cmKwWw|(|RW5z{C5E3vuhlRvOQ-0Y zYXYg5%h(c?CXuEts*hZ_HLGv;kNHQfXkXMJMU4(A|QvOHV*XB#MB$svi>;$vkM4@N=LG<4T zC7yEa(^FF;>5YF^DN!;PQzr1#ERhpbH7X1=OYF&V&(az20#;n6dc0>zpg#5T<53C; zmOHtF?~z@NKZ$RRg3Y*gyYcW9{MKMr+6ml}VFtmo#Rm%0AkhSolkebSmYj-CKcva}FKEG4axr{nCMPMG=ZJ%Y--5BBqnI1dQbJSThx!M09%Ktd+UI<( zI<^co32yZJ#iGP{c=-9zV1-}^do41Bc+oI}TuQW-j70QudPgX#KJOf`_!@{h>dY*_ zpUapF7wd0KQGYtTSndy0+Wy#MooHQ%avyuDJvxG3*On0ZMxw;9Ebfw3;hpz|*(YNI^(T86uz50^=PlSX@eIHA})Vv8CgTZ=~cS z(~OWl=rw+6CScJh9l;F0tiw|+)byVFNK@BJ*@$6sSah;5G8F|&i22S8$*A+OJ9mNzhCsR%=ivOiZ^k$0qVFQw*e zJOL1ywZ(MgbkrikDmb|-jVFd)0BG>1?!G-uiiF=eLxZ|S?iF9ZOiUqwX!DKlhNQ~A zEd5Q%G~NyAdAhp4SZc)>fDPm90tUpO!$x!n=fc^3<~r3?3S~MfBZ@0+7}10H`=%n+ z%9m4DIb00OvM-hUXd7lNw%Vb>2^LAue7@nIDDU!b=ZZ6KiEPfMl@8sFfT zjZAwJ<3qM%PST8yZ(P?M-3oqZcOM2my=D+Lb?vYJTynbYHTr$?%D3X~YL|#KgEWsx z#el7`JCZCwL=5C5F6%b=$GMUoQ`56-LR{nVCb42YbmVySpDfB#ermi=WQRzNhh8lFz)O3A;>SDb8s4<+vG_1Zx zQf*LowO*L_o)a)%=`9l(PeqAD{2s3cv6 zQdIPOV31RkNR}>@N7eqY?w`my=@n5~#b~~1wvj6c7_5o{WbA@P&n!|9>5`ohTf{6= z*RCE-%n;#=ldz=D@2yvOXJ5B~+05ucH=?SE85Gn%;+Qt*adVx?iYSY(uc$&u@HF!< zn(|8T)Ae!&8m7BqL(Y3lfZ^H z+(riTiNvz$BWOH+tdC(8Jn?~K;+_Gjxoe4e$8G^4R32nzAvf+InJIzcQ0qGex{)nI zrQBiXy+i>?gl(4H8c{aQqL8M=v<=72#O!2K zI|iR>Nc>ZV-qm+XSYTE{?fdZN`U02Q7>gpSQJP-O(Hj2EleRyke*mD+)pYV#c(aS2 zZ!t8IH4Y^7zp@}SX+bx6!3l}9&^sUIP^s%3sPM9=EXFa)2BiDZ;0EQc17c=}f34`| z1NjF=lk2TY|DD{Y`eFok=UqC}S&?c6Qk*~56=|KcG3#Uo^#E*Wf*O(=eHV4Jn6pV~ z=ag`XuY1tWXX4T9g}c_DUn_d^mheWqKHh)9PHILclQOpP7~Zex{1P(m zC!;J&KE6)Z9`jW2?JNBLJ-MdN#KWJj&Ad8v_J`D4IA7L;418#FF3P8=1pfS@!3y7V zqe4!t%T6W506Rk4$VAht4wKFkE+61h@A~=qDVqSzYm1&F1Od2vRlrOoPNyb;^=4=> zF3d!+k>Y~zrpS(LCq>U9=CY3#kvr?E|iJ~dC@9MnX zS#Yi-=kt9JPi8XGJ@}UXDcLDMb_=>kJ8_9m4O1HK!B=-qt@=l!+IHJ{bC%2_Y##)xkGMUJU7nQBc6y$2i|P=Wi)2r}m|LO~R(T z%93BhC$m~bkQ)hvNbvt%FjvFFbnFlMy3x`3s(vJ8#a)`wna0r`oSnac6xjP>f79%; zkIDY+)S>d}I@A1F1c@XN!lqV^fmV=Qq?m*{3ckU5xcqWGf3Me5!A>0PyIpJ*_lMvU zd9#SBp~-so4JK~3(sams&#InhrI@y(fv&SNgTB%uDTkhTZXtHz-KD2Y_B=Uh=4~0d zsawd{=N|#MKalMLH68v{7O;r$-ay*Edss^5jJFctBGSj1^+;pMMX)AaxzFt z7|9eo(JKy_EPA+&^^}eR=;6$)mHbthau^0KNFR&(6yLhltRZ>I*c1;MEHlTQrgUT+ zh{~&#N7MXLA9Pu5Iwk4rKi<$zXrova+CqdS;K|PMaIOp6RVJlVfq0$Y48 zN){1Gc$(zO=gaXEx-a)%R;be%hNO9uFqTANn)ygA2?O~c6Ct(Kurc7y?KE8ng1$GY zQ{PAQ2Vo>5+ z@=u}f?#@0XZEa701Uoso!D}xpjS@fzcB06+NZOPcrVD%EtngB)~!O?6q z>I*_z30d4!5hT8hp$MZD+7{Ahq6nREDLRP)FC6MLaN;9OHOee~!lfFw_5FccWAZ%j zFB_;Z0Up^2|M^|FJB@3t8AsonnU~`)FS@_&)R?@LiMZzODx$F%&w)JY=uLU43rgsG zxwpQ#L$%dZlMR^KI+6&Qw~1Gc=IY1_x$ZSTuz`3gTB>*IU-PwgU-1$5((&>%Ol(25 zo+TYkV)w$UEOQd;xU!!qJ#}a8i1Obi*nuEpO|fy5c?a-X50z)Nk(z{9ccgo+slWO+ zeMK2~Il7)5X^+{_v7`wi7dvQkj<5@RjRnL_CRto>{)!e4m?|29O0K(>IVkh_M!SN; z@(`a--Z!Q&+%kiAX%QCj|02nt{~c5D*ASzB<2dPB*^m_XngbYGcjF1OcGP*a71Sk} zpye!)cDd-b`xAL*Ur*3@kZz(Ax}QFDcur*Mn?Wce!hlwUsX3UvrrIjYb}ZZ!Dm+2=h}8x0d;4&fURlyT4l8{qbh+ z;B=kx55@BkBgk*ePpB(zO(w5j9kO!pPqb{bMYtET?Zp+h`_h`MOHL+EXCYr9$Vq6? zLQA(fOwgxHtRqDa*5>+tpDvbDR)a}%y16mmEHyJQpNs)iLi7mHOobh|s==F7by5LT zK>YMxZ89!ol7mlaxjFG(1u$_~TjMJn|JI;ed_FY4I=WN7Oxh-ajps+6>yYYkBGXu> z&+#UHjqPz#-15e@WD-k5W24fNtaXL)1*T?CYGW2d6%FL!;YwLYrdAp-77j)yE9D7? zD&9X_@ARp)JX5Mn3h<_%6+xHs85&xD_mz5(@;yO9(t%!H8DHXJU@FSlS}zTdRdC4DHPQ2)8l zQ-N{W+oxw~f#vM(3(TMYI+@i4MNeicdWw7R({K#%46Y&ZiY*x(Q*O^Ske{W`b+$V# zDquDtM&y;Z;;DA$I!diIVmmH^*i+G}k`hW5o|In#x_nOM)RXS9W%aT7(nj=ZIgq0k z0}Yqsb;JFj67Zsa;bTD@leyq{N6vDv zvtGdQbZy64+jk{%9p;$>vo){p28mb{gy63)^rMiJS%gU<1mXb+A%yuMA)Wm548EnN z`KQ-%kVo)AGV-yDtnM|(;#Rr-R1*hjPIuw?h1WBGl1`5wuWT2Pz+>tZlaDUd_!M#Q zGCt+%y0)b(2;qCcYZth0otjJl7kXlPg404bDRf=AXYyW_pxUNkUTNtv<>B}=U}fM? zk>#Rjs&UkI;3-iOo*BZP$)rO&GEO(0SSOn_dwpXsJbFQeB-3Oy!7eV~4!{^vMB``B ztN}+3e+WQlKtm#B5OhkvjA#Uec@|~FA|Vmb3*y+}Y+F$Sna5e`wLMiI-*i$5VGjyF zMvn~xF%*{MmmdY5X#V^{0@`^CX_BtY8|p%`d%CMx`k)EL(tR}X6g+E$C0;g()A{D` zor*vmz-nTODZd}Wh>aoidv_d*3PQw7Yr$3qKy;MrPS8<`$J<|q(SDGG>MlI zO8!(*)Wy?H&&@*sjT@;92Go?sldPUS9}oe|go-?expv2MmvPTqJ4M;BO*R9WP2?Apr?xlUd5G z;e7|80`D_eV;@1r*b#Km6hPYIEnZn#AU5dKH>SZWR85i<&Y7TE_EROOWH4lShltdr z{IT@fNtWG#q!G9GHTa1}>I5Z$o!ELuSt#E#|16L_nq02!q@B(S_E-V^$#}29D8mci z=OG)<1+dMr?*6UHqj1u0T##EG&xoe0U;gT`Pmuj|9O&%pD>AjhRJ4@cEN?$?V4Xp5 zOWOo_4grAthr%sM!?wkNN8<~e*wy6H2!kC>OAEKlU6ZiGhT^RTZ*Q_#qC3!T35P?| zi_RNQ+nGq{M!IYmj@V{FTAZ?F47I6t5Lfy;+Ez0^tLn)X#mVBd*Tqa(!r_MzO_)cG z*H1YheU(NdFSo^;z>$B#-*(F< zb16K(KV=}e=BNBQTCS3h0N+V@QdxmUVh(8?hHF4*Wq?cxRg6)J;~95cM3ZHX&wzvEvh6QX&zz~f6%Ol7GFVBHUi6JxGRd8P3uk*CrQ1Uy9y_f2>2 zK)!uY46}A-H6kt(^1G|xL)+nrkzhvq^c5Qa(GE-Z1SJMD>b@^4I3`AvyJMT7Rqo13 zqL)(a$Z_ob4(IhO{e{_7Ph1_dl!8@d2ln{mJ2A0``Fcei(rNXiY6Jm4R!v8fp#id6 zHakLTnf8aWrjE#W@RNmvKKfxp*(31q>4OiEEjEEz2Gj2TeHY*a>yim76U{zq%`8Za zSzNq1OM)se``bc*6aaAMm%g*WsV{_yTRAJ(O{XC8R}=WEj}zIjjP2xz9~tiXw_NhI zg|$jNCo(SB%f}P2-R&{F8Rdi{8u%D9p+3xU%DDpbX9df0tIuNKz811j5gy^Sqj^!t zkBV|RSbX2MgSri3d%*JBzOf6piw6m8oW`kUJoVo`q9p;TCDH5gAW*$)JR ziCx39t~Qx5mq*)qrJc>1Jm|xLpDbj_j3O{&)Kzz$!pf;P8I}XIThRR<^F}?-(*PP| zLu7R1QZi-sOZ_0t21;Mg>fwlD8mYt3a;NSHm)kXR)j~y$j24G-Pjv71SDmWOR%(_F z!4xlh`{5Rvu%uUhcggvvUuEm8?$LoJ)5)aPv^zP6FtBNyL|PG#lG>oE5a%-2nSR9Z z(BB$zvQeZci3IYhz6Fx$1s`5UyKu}6=>uEZHY82m65@&dF;Fc8+!&N`TB4hHg9&z| zQRw5<4UhY94nfvym8mX-3-YVBPEdW~m^XzwVPVI_T|aup+Hb%cfF}{BdP-x=#@Bji zc3(4<`LcbSq_O-?Epk${(|Kg?H&;ttgU)!b-qbFvCQ#yi^D%cq`~4-FATRzMI_JG%?DbBq1f6n*VaAf#J{xFG)G>Gxu=q z5xv1tYE(AdoS`-6jpngxaTtTadgWe|jR&AfBQJg z9UPm#qoAl%+iB%WoJhl!I>>K!Kt#H*A<=JB)U9$MUDG|VB}%hJjUa-0Fh1>#z^UIC zpV^`T9%-|Ec{Bc-xt((X7X_EY-Y$zE{BPdUKi)Sz5s=9y|6N=8(!;p6))17 zAep0)&q?3#4PD`)t6f8^R3f5qq6NSn8xldG!P`Gc$_)%BV}L+I2j}ZR_(gF{O-wDf z#7ftBB)1<^0=(#%A93_bDFvA4XGEi7fTB8J-8it4i33V=MzH<%BdNpFiH5nWa zk6OLmKiz9|f86K~v=-x#U}jniz}3$9n8EU#?0Xt{SOW$tjs@C_C#7^0vfj;e znLS=(V&jeH7BDbfQh1#F^b*133)`*+eVmog%$J8o-5BV}2i#zRQrz>5ijSWj0A8Z3 zVO#6AS#PJFDzQJ(g^jL7ZgbYUjZ-fb!u&?5q}imLWVcWfFr7hk-%QkcgRG;bq9-J| zA17H?Lj-z1(4pk3;wtcAa{Yl2O`XJWKR?szbf#DEavAB3W_LPkhf*S^n$pD5_u=2i zx&LtGkvg~M-W)<2h8m7~uC$h!N+3}mfOy%@5nEN9#{(OCxPZO7Lp;)TM1}H&y1gLx zp=s(L?oeNSYPuE&uo!^&ncu5fI%wQj@xru0R-sKs7qQbJUME*VEjQ1R-mixp8+$SW zExc!4cR!gWQ%;go*fr#6;|u*+pIq_ znqGfyp{_AxyRPzXgf3vd$ z9;a3+gf_j4;Kf|3Ob{9p;G(0=!S+LhzC?aalEtmDAgbeqEKd}B#gQH0h<-usLs3jsIhOzynf8; zXzrc`U#TpjPPJGthYB+D&H1S6`u(le#bpt|V5VA1{V8?WIO(optBNtHRR=XwesHFC zA6XrLGpZ+T(JGo~YFP!{Eg`vyF(!h_>`yigM#9KJ~K13TTN}(=Q3Y{JO zQ_fm{1!8&9$~B}d9PL(B1S&Mzg)n~*K%@^68?kg_$8RU_X+yRk zd{bYR31buc-4`gXIu-+k8=j+sto&od)&xMK{)<#Wl@iF6t(Q+jor|7q1+EFm7l_m~ zabtFo+>~bTb0;6jaZ{1FkYJ}<-G5^eRZS)Y6-;642u2YGO<`J~lwqHh-tqaX_O>0l zXQ(1E(%&95;`Y2kXek|OJF?n28_^jdsqSR;+s{r0@GS}6Lq1qwV>a{Z4KY=8u&Z@7 z`4kMSwmSX<*Nn>Tz^GkVf9J=+UoC}WlD(LgWVs-)A4?oFV2XszqxVihJak zi>mAT_Y9zl&5FTxOIV!H!J?JTvgIZNHvkR?Y}AgD#+g}USvFt#TQTu=zMb`DH>)Su>c%P5r7Yk1+q)I%uy~--=S+&LOY1kR8{r^dlg_QEfO;Nc4s@#h!TLo0pGwv-7Gk^J6 zBwxHQ0p+uB=|WL?s(n1xHL)2?5e(77YgT*gA;~{ZjC1zRCqm@_-s+nFQo&YV$Uf?K z&Br1nv|{Wv_X-wD4!{kkiJdT=>MQrth2KXZjY`CCi4^6n;GV}stO?#CKI&+!$7bqT%nb7ctY z)UJ(8$S;;X-Xf2B??4x6u1yGAe#FT6cCuGcL16TJtH)1RYi1R^FVFp3e|Eg<4`7E0 zw>}`}lvry7Tzs^hzp4!?q&r?cA#l+C|8YYsf)Z8FeHJ}4NO`767IliY_=8mY^Z`2> z!{iM2J$*rh9++yAVc>1b;KVtDgEj2{M;>t~B58DVoYN3Lx1kl#4@T4HcXEsmmR583 zO$vSx!OS+?YO?=UcJV1RRf|iYj)zC&6QS_(qkP#K05UKUN%wYYjZ;j;bXlUxLNcVWQ*S zFZ+hL!z;>M%Bdpnhq)ASH;t#z$vUiI-tjRO!!S~k`4yLw1D1ggTOInj+COz%HHLS! zv^#aJGTKZ<&4l{lFLni}e{9e2k8jo4{R`b!J3;GxoOJ$;ba|UyPn+; zFB%5GHhf5}9PBSdNZ7HCFw;i;bu=^RYm$dtT%ZoKps=|ixIbf;aMN4>4~nnyY8)vm zXe!F?GLugIXQlis9DCeLzzC7mpN1+%AuhI)K(QDC3gM<*@iz!PQibh!4gc_< zbh#*Rm%awDDyEdL)VKY=OKE0L#SsT<5IRtQQZ{^iG`_FHXIAhUg4CM74@MiMXel@^?me)CL`jq-TQe{Gf668@vp!)C zjm`@OnfErnmxt#Ud#*QXD8#{qBXq5}D}K?U9W40_<*dPlrvm?FtA!&OO8e$2(-^d) z&BpKVPx~rGek@qfF?f%T&Y7t`I~(a1lxbBK@?Gd8Ol9n)jGK6WWy3TBg==zoxt|Q` z8io^_1ZBjPmf+_c{2|I!N{Vwaj{AFoU0#bv^nugfcg1ZG>5=aDS$Kd(yBZ(FK@!w# zv;R=C0f_mQU{xm$z*T230+++Rd@IQo(7Lhu+Ko0&TjvA~E&eq=Uysb!GXRVuBZUW- zono4er4&C47&UUFy3oX^aqu``@`?h2Fcc}rCD$B`GgYRxXJ{MIo^>q{}}LJ25WJI?p5^s8*^-SS|ca z5HhO2z4+5O)J4nNt*N^4g(>t2uvUSagY+V3Mx&hKjbK1lPf%yKg&M^Yt@*n*gUGS- zi`!RlcjcPy>=xb_9GG#~T7Mu8)lV+|iD~30%?{@1zcVE3$sGD4B`Dw++aPIyd&Dl` z)CbJX^s+1EWdSYrBq(UEC1C=Oc z4`;6kzpdOMQY>926p1hlb3q*2GGVKZX=^fviLKh(F&QUTkS_d73Iqy<#lyf$zN6}g zEMl*kgxvx9da`J6O)G}vR>ykm1p5p4Ydg3K`>w>K6+yh735pnZ^0Z{5L4Vu_r-&6x ziNa+xQ0Y_bfhF45`vy!5wCTvlufM6`eN9G5=B?TKH1a?wH3->iDZeQss+o<_bXGJO z)tI|Ls2R=(GKB0!(|3~7U@YnIuiT|nq8xo6Uw%bGFW0)sOqLoROURU$}Y^T!&^GFdj&r+uix?>f2ce#fEEjlH`5wFKFo>ysa zPkHCg3St1@%GJ2>iw^>>GJ*Tpr^in@;-T!3V3I`{8~hM!as{52nx6SJp*F1|daNTR zdCXNJY&+f=9%V@08|3S%Y1-h}Xko4E9K=tui={3M(Vo`y_HNeW*&D2aER*GM*a!^7 zNO8{C`qO-qo>AwyBHt`Rj20Yd=mz~{t>!}@aIg}g?e0b7lw?n5qm`AVl+)C(nZqrkG8*yvu50n5Gz6dXxYq!sy#_@51Gy; zAs*W>DPa3!*SN$DX$clp{! zai>+Yb3YHV>mD?HT{~y@x}f!m22e5aRX(jCn3yDQ?rBSjeuM=Vd=YuHNBjOn!8Iu_ z3YlMJX-c#S;qP18b^a(DM_e(++v2bY29j#!=Gizk%#5n z<)u+k&ixIBNNg&#ZaQ}%HL+?ogT~w&y7*9L$c!b!sw!gWMJiCku?eouXz_yGY2ZJ> z&A->an0@O{M8~Zn|I;Fy)EM^c|ADHTa7b55RMw!aftp?sE%|YQ#O(Ioj{R7fECbA+ z7P6dFfMZ1{!6Fh7E)T!cvdp-FG$T$%y7bYjRg@G7<8Gf(ruYF51XyBCXJ(NF9AfuF zIr0{__3xEJg{uLV?K)aZ%^BbEHv(@$k3N4^9OIxJ$^C9z!OP9x^B_kTnqm(KN7$jA zXB)r4#H4ebkY*UB7$GoFJmzW{5nLyZW?(CHDfftd!okGl@vHNMhVEYq2>5YF1=Q)O zhq~mL5&?AsQL@(zx?F}Kb9eEDKQ=$!@{-HcRr%2DD)*oh@g97=pJFR_-aIMm(~J;5 zD&F)HW~Olp=khhnV;!nTkR8}n^15~xop|paSG_*U(W4VSsKS7qhyv6 zDjT|36q}Nb83lQD{t@ZUo1}k%cW=uiAK1RlKLGZ{YS|^sc0wP`E{JN(WoF@mvHXe$rqSn4v<_rFVQm&!2 zcb6Neah>Rek&;mFQ(@Q3Wk=}!hFQ8kXeGvQG}(JIvoMTzkTH7QFV9ODdGd_G55w2E zB%GiW=f;d046at>e2zcGvQP3KH1W(x18kc#Y_7T5q}aLrC{3Nd8$1-Tx)@RRKF-Tp z75OPF>t3Nh1{n+;iO+Y6F6T!{K28!{HlTyq}%gK;etu}MKixR;EM zvT=^P!AvMX-Q@CQ{aINviG%_O>_m0@(7~@@F&zo8Qf-ti8%`TfgHus%@<7q?bq{^! zX=X{rL>3@)-*_R7N6%)RKL!~#dy*(pMoW~~6XU3;=O;@4g^s{6q>kkidsj?!ag%@Y zs*GY_sNL(Q)DS?XnBjz%DMRJ6OJZDJ zu2(AmXxhlmO6%FQ+{bn2%{QiQJZv>Z6^D_<-f`m+y~?7Xx$fd>0$lHj(r(r`RcZUP zQ_4%P&2|aXkvooiqvVYhE%Y^t%fKg|%-J7ve7S_EYpWGBtba{-3R-G%Zo`tYvoc*p z%xR8%ReqbWX>fr+m&88)$DTbOZ#Qa?cZu$rz=FL%k!OkDwoLyc>6{<({QD@LZM!?$ z%ereV*RobE+qM@L7ME>%VcD)_TgzOo=j!|X1NRU2bzL8v_c^Z<1*rnphiYSUpW#`& zhsS<@#&^Opsm*%}p^c7WND;z5Q5R#*`j2c_f0pYrVqJR&;=|X{Z6F61WGKw+pu5whr_UHT zhwAQZwzHU_{Un=m3_?{?!zJ+WLX1F$qYxu&eLV@Ho`TQ9Yw$=J#xmfh8BaAz2 z05&6CTNnzehfBJqf(<%HF^8%@1ova1>tvBEcvJd2p>!l`bi7M_jz3ctMvYpc`VHv< z{}H}IavIyXWxD$3Y_h zEhm&(GpmXXjj!7Onb#||f&PHg)ZK^okWH?rU%XReq}&^gE$19D)YN|3wDwtNB6UfhoFm2O3z`fc^bHM#al=)ig^TeC}%6mOr$Y zoa;7%=@!pJF+MWa@kH}%ZRCTyhZD#H!JB|LF`_OXw4K`nZYVJ7>`ov#@#WZJX3|+t zP5+7;kISGJWgfDvnSV(Bjw&aUpEYN)9k{+P)aAJiCl-i1^q;z-AH;S3iIuE;uqRyf zxAf)r&M-u{4USn1gTvMsl_E2aze1%lMF|)XN!xSD=B~ni&u3N49qa=c_+4y4L>rqi;C1EEq;#qH(K0^)os_thKLioDUdZ_q{k z*?=QHUFvavhu|^yUs`v=9F!@ntEZ)};%Fd<^Y#|IeXYa^@6m$;JMwA~)LJ`wsovbD zFG3-GPX!@;*VsqZhV88FpvjRKObm>y?&P%iicmX(hLygZZU!Ry(ws`f!RKo(=-KG4 z#nBl5dngoav}d0e^}L@Tt?GIRF)=4QJ+&6pRr-d8ZD_%ow~;3{`@QvJ;<~31xZ9{2 zeP{z|3R8#eM!caQ6g5<7zP}46yLwM&MU`-IU8g|aYkyoAyuY>iGVYaG&t*jVnXQQA zv|Q3EiRfqi*-ZNJTO1A*%dh?5;M{cbSB|LDrl0AMzVAZyE^JLGM>zJcwWvzp565Pe zJh%l90Xn5W^-;7}l#?+0k|fNaU}3)N37FNyv0snnm^edG!wdd)U)SX@PG5ylgCOA9 z?);>J&fs|TN5t9A4ziY-U#-FI1-i&xd4m_fa|d3zQ4IE>-NR6LKQr>~%Z{R}$-`d; znJl`FumZ751$l9iV6S9d$j|8Ks}SfJcG*_lRP(l8q-5hHJ4gBU`c(Z>65}{N=gT!0(ugU+)43g|GxI`h*8`lm$R;A- z+x{D1p2Jj27pCiL6ysKUVQ+Aq67REc58~d$#-^pVJ2VHcAIe_-{U`3(dMkiDHi`BI zF9`Y;#4%;aOd7{ z`TB2da{Mr3(egEKW_0Qv;0I|^54w&$*{tK3 z>9u@4bqX{upE@^=T6Vb;ot2a2__$q-@`$HJ|8AEYaQq5d*UQb=wA@McJAyu(crfH>h+v0mKMQONQ%>lCExBh-O~W3bdmoQ;v{X4hbqR6 z=eS}sY+kD(R4V;wv|W9E5sC2}&w_>D07;&zU-m7|iZ`+WLY#6ns_V0~5Yk>+@!ZXf z7b%E*vxk#2ySDV z8n>YBs-Lg+o8?ib-RlIoyS&pE$e#etR@!XoK-Q7zJ)0|ViOy508zEPzc(%VJ&R*?O(veLL(U8g70HVJ8+KHLQCo)j4Tccmzr<7?`-Umgn%cN(w zb0(fi&@eC|A4kukO$Unpl>I#5pm{2xc+Z_4g`vIdxS%N@c#!VGc&?-+INx%Z{q&IX z*bt<-u%ENk5VmsfcX02^clwMN(exED;)l+*AFTNH1n)UG#(V(*0bv8IPo|)spbx?p z|A$8WDKD^;1R2iO%)-^YeE@yA(xC?}WmbWlY9kjJ{#46jECMY-@*o;eP$X<8gXGaY zqg@?MaV^L_uIE0rLw2oFCilz)RUyqk&jM*v>Qev?d98HK7f@)ju%W|zO>E$vX>*kM zPer6WRC0cGcFNw(o8jhZa@y1eSv%fh5)y_F#$`TQ1Z=Yi_^E`v{H5@H#!M^u3A=#? zpWVCoZ;4tUb^yffMjf7o`RHGs>pBM=bcLLC&j0&cVu`NA)9Ta4XkZM99hgWb(C@$f zjJCPe3;`jY#@aR#cLF_6ygh!}S}`L{1!{hrL8>m><02Rp%7`59l=L8r*lL1Fdh_Yv z_g8Wn)*~JfML#BeX|K=rOwjwTiJWua<({?_RH6bwEM+{bbP4XLoIlL*=XFyl2FcFU z)EP}+(o_4)mX>6KucM;bnDE>KOmbF(>X1j+lvw$og2wx3a+|ZF`vbXJ>1%5Q99vz8 z=XCs7t|D=mR3#o2&j^$S`DzFeQ#rg$->4^_0+Y#F@c8kEd8w&6PL}Ax#(w7h2{@3I z)DqIWAEl zTUC)!dmyevX66cjf@HuM#*Oq_B-x9LXP1?EG-7E=A;0!D{?=Qa(A($FAlM1=v|_JI|Pu zbf&5oTbJOa36Zg-%DQp7p-keq>GBC$3d`r22dnL>zlUz63&_l=B(5x?L zAe&V(BPIH|>l%y4ou>I=RLvPF_A?*_Y9@G%&|E%K&JP_`ktYf|*lO1{L&He?9ib@z zNWnd1CVdI~ODrV=_^}$sf(9{= z67}8_>!X|=F?A?Bk;S$IH0@IENH4lN0ESJ9LAxpi@Z;%yRc*9`rC}V}0Wt7Rz_|b% zj$4Ag`<-NT(qbh3} zq`E(Xc}xRNMD>$s8olB;P4uU+r(dqGxTF~)h8fUnwo?FCDI+8#=2q4*XVJH3K$zHVy@_)vndUwxECCJSbZvJ)f^;R8K)--uBjhUR$zG3~x0?j_?TK z?|BeTE6>g$=a_&1pMZ=2o6QHnomgPfZzQwra}~sRJ7P^Zm=?)()1tq#BBNU!I-H1U z3E6_|cr$Z%_mTf&^XMkfx|B4e?eD5+LY7`qEzZ%Dr1}Y((RBzOg<-9Tz3?i@I$3ir zIK2tz6dON3EIYitGOi~K>iH5A$gu}7I;42gC>TLBAl4z=N;pIe+})5;?$1DMBVBHZ z1Gx1Q%%W(2jltI);?<`;LLA;6kQueR0j|Q5r?; zhAj;)+c{gYuDjjergmmmCTc6GPm}V>b`OvT&#}8s8l-yrgs9nThz;1BOH;A*Wnm{KvDuY z9vfxlZTCrHeZ<8<)){WCov9G-vOHKZfIFfwVsY1*e!cr}L1%Cpc@`a`J+Z|#Yn6K6 zm5Z+Hg-F%P&09u-S3lc)V)REB^6|f8x#4Ymux_6jvK8DN$u86p{D|_JvoY`Op93>; z!~`5X%eZv0F9X(Wla^U|-WW~E6bvfaol53?Qob=`hBQS zknWG{%cYA0RZG|c7&70)k&k>HKg4j9WCP>Q>an*NCK(aX9rpVG)-VN33kb$tL8rIFHV*FJbPYK23!o%1J+bxSo<8E& zx!XyC`C+*wQeva@h^kGX3W8NXI2d^$BD^6Uw^8;)tspI#&BAOscIS*|yJAX0;~xO- zi@nRI-(DPb^<9drC%N3B+?yd^TAdu#xi1Ky^7N01WCaX_A}YSCAWMOj4(4+7ravKS z%$CPq1Z_kjkLY~3hB7!KZ~tAXB7k?Fh(Z$w|Fc?M1#aTpf^2ZB1*nYo>fJpUK^+;H z^$nhLi1(7RtLoKs)1>TOOH>vwP=k)R3KEn1d^|vIkOQDEbSH$?tFvdKlkvy+mjd4v zx@e8Pgd|O{%B+o^7)`&eXK<#=A>T`)a3P1=9f0uw?L`+gCB2*eF5hLL4B zYAVg84q+;X03;B(6-b4IG1HIn@hAf37X(QX4{3$B9GRCF5hXKqX=CfWI?L#GtUC*6 zvR98co7KOG)s_^>zlbnhk@2imwec0w-o36eYE%gE$jNa75zzXw<&|68+?I6zkEt<4 zT~9iRXeH|pqMjh)S(nuuRmwqSIk1G%MBrBmfVdeqENWz#V^2Jm zSQe|^GbYyGy{KKn$F76Cq?Z|V@H6psP51xc|*HC0vRAWz)TPvjui53Xm{GvZ} z7UZnS+j$LXEte+s3eifTaz+bfU_ngv1;x7coO`cU$fFW zztke}D9W8DmQ%JYtsN-Q3LQLEtGGVZBoIWF5O1wEH2eqcG zG98Ck!o}a+tq?Y8=#{kOwUcv@5S_%=iNCd{AH$A#ycV4R^9f8-ut2QMjQQPt@7g~8 zPdo=oc^zThR#a;`DtwgihOfEhu-{=7!RHazr>=61Uzt@%$iC3v#p2YMow?TNvfBuq z3p=(O#2M7qu=1p^+s#VH#-6mlmfdX9+Ko>lWL%%-Xs=PignY5t_To`;o5jSK^;XUU z)Q@yl#)w(G#D#2q`W<_gtQJHVnGzCEYcoYxo6fb0#9=7nA9g>vW|tsh$})uTFn!e( z=_f;l8&A7m} z@&_yMrW@>|r6b34j|F7^9(^jD4AL0hFT$NL%(`4yi#8K~Um75mAm>4Fm!qYF8}f7u zx~a71;vA%93g?gNvE6oj9i~1S^vsp~p185EHelr)`+inD5S6S3V_L!UhhZ~$`hlPf zke|x*_VG?O(hJCSMPn|dOnSJ-ux^_vFb*HQA}de5#4N_qw~voRX=GvD0Zp@LW5)SI z>tg$QCruOl&rv^@Xv4+VFFr*GPswhrz*+A$5JZ=A(|SSvjj|xmZpJ9|4Jw5;y&Ai>X%p0@3J8tMvczb@&m zI24>)|5#$)2iO_*eqeP<;hgUV$}*C?NHX3F)2@=N<4;qjp~KMhECJI5%8^Z|hK>TQ zE#bJ5jX(*Dx%vosEe^G#{<*~c%w87lwkUi$Owp$o;@C~Tc&LkdD(V2^me_NTugO^S z41s@kwu3H(E**YwZeyUG5SbJ-P*w&=cWF5Ryt?I?ip3oG!28qz!`v(+18id$8>74TR;Z$3Y z5;vpOy=L8_#S%?_Q9B7kD682QJbfn*$;7HK+p02wKl?!gLn)U1-~_{T=t0C%&UAd3 zVe<{#&PjtuI>0mAy5|?EBZUwV74)bwj|Nx9z{K(24p9MPjUG%Y>vr>eJPUXe6F2EP zcF6Wc^)bW<;qEFOv@hkB{G40(*{UJ;p!MNjn`yY21$YI-0UQlcad<#TKvY2DCf^U` zk4hs)`X8lgwS*d+nCJ^Zc;BQQ*6rRyq;tu|H0h(RE@#8JCLH&_(noh!dzFNKo3*G_ zXeockV`9{Ma!Z7T?^$Crf)qg`n5pOB_kD6;X#(O>#)*0sVNp}YiGOxc^}q5B!Am3f z(I$7bacZ3NiH6TC#hMA%Ix&Q){&-XQCPe&st8J%JM8v~Bl3(yQge%9;1a}mkg8I!>)cr}nwBSiA99M_+{CPXksOIBVu1X$vgSar zT0=hFxc$m=O*tl8Lw}Psr4Q(&w3(p;pR$Vk(TX^&Swl$eRiRjlwLXvrBO4<=$(GAv ziQ}oiZKq=AkYfGjwEu|Ai#(UfyCB}Vg5_){yE%X`8sq-4{oh&S56rk+~z-KuE-j#d;Q;Aco3Gbg(*lk7?%#G)9kF06*IBnn_>Y#;+#J z=i)DS=qy-y2?~z4;&H|P$p4*t18NUfk>MOXA>AR3CSxb<2^SJ9s0avie|bvQLL1#< zzV+TU9i6AH0lI!33IX$DhtAiZupJKWMi+~6;2Z-2p~hn7by4*%_R|{glR{3f%bX#Z zU25)oQBZB50a#3I7)}K3USvvi{>i=wws{js*Z}r&`>7KRO2Ta@nQ}r?4G3r=Je=Zy za99H5KM|0;esED#dw8#~nDKl1^naT07Eho8(bfs2XSDhlj%)Y(k-Rjh?mo6FH9GRY z5Y7dw_wsptwD&~|f-HJn5cP|j$WIST_1kzV3aRA9E8l>IR?T|JN*9bcc$aj*AkpBQ zXeS!GsX_8nj)PsesP%P)@0!oT}4Gar{Mk$XArWst)*1 z?{4{}RH(>W7+ijc&)?VCB$*hdT+q9h!l)H@K|>NV6GdEr+&rhkV=7Xq%EoeQwkp; z3jUh=hJWz@1=aq)nS_C3$s_K#T~A$kra8uHKaAdko22Lid|JasBiJ=o_E;A^rjF~hiZ$>(i65s6i@&Yn~n&XyrFt!4)j-f34V_TBE4t>_l8ddY)}bgO8^B6=jH< ztopZr9Qlk)_-*LCyGGA{JAEqTJi$ECJW0628&MKgb229J)~zWi+t6mmXKPQl4j^4* zF)>`Z={{iahrHN?HoVF2;k3A>X?|gx>~+J`CeoxV{5b?Snf5q_e~m5Ne{qeDY!JJ< zf7P!)2)YlPkJe~%`%Q_Y%|1=7T2H-dt)nP&eMn(KSRh`a6pRR0cF3tNTSB?~q^}Q_ zq-Oq0HTwDUpTXy+5=twcauSX@c{VL_)^FP${(edN4*LJ$Nva5!@ory)+XvdNmFDUx z^g2xy9v&953#2bMT#T1DT4#_E-~||3Lt$Kc=rs!y$JRI~fMHk$rllKT!jLv}Fw**p zr{!dj_Vd%&5|1MJN3;#1=P{O@v5F*+wsOGW$tP9j~^q?oda;t#AKz3{=k{qLWl0PaLfyF`Dr7TI- ztkkCPeZ2YkG-*G2=A8aM>@4#O(CWhOY1gy@dj>*LwHGCzl?)KZGQBzjAbvTVAWsAjlj+xUHPhG?-`sQx>~P9cbcEhLFPM2N#Q% zqIs3|EV#bWy)~QP*?`W52#SQ_HSu~1UHMFteGVIO)!xKA7RgrZc+;(@H>PA+^ZRwT zSNJ7;_3~TTpj=_AX`#(rd}&?A?AJec%R2I0(0mY7njtQm`(bS2p(r$+P`J=6TFy~7>6SYH{S%WP5(=u1-*);wLT+GInCzFCJyEU z4lrY%TkR$`28Q@U}zTTaaWJmoT&~_ z60sN1WS-Fe&j8HwZ5^GcPoI_mo8S!*-wRf*mderl_x_j^_^F>m9qJQs^3hRV=;eM* zFS@?sp*DzQHdW=ZyNG+i_5aWDqR^nr$k);Un7vyO@ta*5&;ZSHAnav;mP&&i696&$ z*M)dqBMI%jK07sikp9o6uPFS&3T}%qi`GT-1(9w~nj=@vd|VnOcf#|HIL)iKUA4@W zerX^lilWo;_9$WwSPzYxYPK6WqDt8D=V<%-jy81eFgec*BX8D*Yq;?6hMbx7c>=IhQcGtLut~J z^$oma9{56QY?$+7y1Y-9Bb@;>3*$BSUr7IrPM{L3#Yw`rlZ3vz>nT_9`X~+yag0h% zojxlhK$^**+@NyRSf|Ay@8YTEdI+iuHN~XsNr0`H{{BTtS8FhksTWuPS|RG86X|Q} zqdr673(-cim(J^k0r=`+fJvN$+n)iKif6_>uUqqX*+-#@!lD9}n`M#I3w?nrc>@0A z`O7sYvi`3%;p76|+l2#Ko8xL}+lNsTP7Uxx%T?qo7=j@PC+zxID~=}t(L_tK{jB&iR$rwWVjllIHIh!cRxkMh)}&K|>t# zU4N(z;O;{`GoDXNP~J3#{_~wb!&L}OFgrkn|Az5hfkE@vz6riGj`DbXMtq4QdXrHi z1R->Us6>EhM+~fwRNKF?)ESjhl~4RWk8I=SW~XEf{UbPkem#wSakHvpOkXm&>4XoV zI*RnRuL_f!5ba0~rhARgcmGG7o{m{FEFF=Y#1-e9>qK|(5%DvFA7BgwxLkWzB!kGk zm>6vXBXb}IUC)j;tq!&Tg@C1L*Ow@F?{<$tvrCW4i!>Jz~0V7K4^=ad+B$81gq_dhi2L+mEycK~9 zo4SSi@}$A}&qb5+cF*{Fnmv%IiLz-^Y#~@P@Cfd$a<3x4mvRWM!<)0%ggX`h*I>sm zkEA8fUAX^(c$NpkKl)ai=Z3jPTSX?+?RxwwP`6fzMp|Ox`Vfp56WA$+#~xzP8w|&o zfnlyfcpfqcBe@lQl<&7a6)5X4;xKX(+n(Q>K0#+!#z;-#Zcq!L)L^e~oejxr&sj|;L2jjTfevU?U1T^5sF7x3#`7fUSJ_;o3WuK{Ty)MYF zgYSKxgD_PpJd{iI+glsX7xl|Ds+oxundf&Bcu?bz8b=C_PDATFuZM9}@-?`%TdWG@ z(?!&aCQ}&&Cv4ddieVFNRL?dilLL|ia#+mHIWlj|0nrxz88XwJeDBWIi+4r{1*}m} z86ZM0;=k}Tp!7WW3^6+Eu0Vst=2xM7ZXO-I`p#2!2b#EE#lf#2%} zohKpEO*zI}C5XKHJZDFAFZVu9WP=(JQz=t$G9?hSuViQIiwgLa zx{9N#iMHre5Pg&OYyXME?-XRYR_$2u<2=MQIHBbjDn%8=M&h7k>4mvw5jAkoB50y>*(!mCc9$T)~F;h ztuQJ;28x=~Ec$ES-!B_;#qD=nYdFR{w9l5Y)NW<|A<*=mj zhnmZKz`SPKmB?DRv#OKYXwf=bp{)uOmN8F?ip-;O9J?|Y9!D37f7t*Gz;vU@0l)>D zEX}Q?ni0W?83Cb{SOXXuzfB4ep}CMOs_>Q!k8T&=#m~2=NsgRcVifom-JWYXC?HiB z2dLmCVz3_c^kruc8$Ua@Th(O{0FrHQ0OXy&BOxR*)mYjw^k`262^ro}S~eEDNH;=) zUv*=;M#6NG?9lsN&XfWJYwRy~S-#65IliY2WGUvp_`VnnEP{6RC*Y2CbvR`jY8o?sBpx(q4PjVI2<7e+>dCh0Q z9YgIbj~3YSa6z#lc0X9q5WVk30Al(Z@lbPRV}B$N<=A~^cSVeRD%gu{<#f@LN_F%K z;OE7wDOc1D;IDTT>f*I&sg;(~YsofC{sD*7Zjs5>z~V)Zxpx;RJ*g5r;)7=7X++KR zOW(_>Zq`CY#_XK1@COBDB0+ebe381@AO(vWw{npcB4NE%z`&}_klkXNBGCs6P?84V1W45oyjjQ^2D&QjHK-s;c$E{%F(=Y&u zpC%*>8CXd%r32W1x>Q~_%Qlp(d9F`&5dTIqeF*ebLjUBbvuIy5;utT7#+7+F&~f8K zN~T&XPDHnbk?Y<_FIGfyXn^jvg3NOd&fDJhL)}hl_9c4(eZHZRmW9Vtp8P8UJRXmq znPd4vYwJJQe;qNO23L&vi0*x3V_RU8h^D=B&%d3ggglHTK+E8*wjn@4gyX^O5*CTg zw^hLKdIklpRs}8k+b6tc#hTVnqVOExNPz731!CsSxTvI$5FH2P`QEQ*5H*s`wdZGE z&I+{P{Kim7K%n;sq@lV{^y zw!7!gvY!zPg6Q_C6-grdSM@wU5=xN=M$6r|oQOueS%n2Dh^xn=$0GON=H0-mu%eS2 zT*{0KCG`1p)Y1=Ed#0=Rj#r1{!Za0a)t5one~0#bIju*vRBge2Jiap)EbZ=t6d=Yq zMz-nM=8iN~@@1dNfFW((8XA90e9w4xlXD(fuWGCc>NCY^Ct}7! zp=x_EE=FldqVCgn26LwYpwXZNCy>LfKZ@r=?*Cm)=)Jn>^;Pi5ltGBcw-(uy82C_k z-aIi3q|%8GEyOb8?h0U^$7KG>v#mLg?OcXlBR5nQFHp~*Yla4X4t5ojgg)UMNbx}O zdAy=%sM%0g=}#j@24NX|B6hE+M8;!gL`Jl+95+jH?GC&QvAym5`K{_@bDz_~jQ6id zYjj7lKbnDK%jHoJXJCUy2U}eW?wUDrwnL_F^28ON=j?SyHxMcQ`#LT8?lp8to&KOI%K_0RaQES? znrjk8)(vQh9~0!2j}cxir4~)M(>5fMHHXDwVuGvfGTTnLySvTkIY30`wAK$O08K~< z+w{){jEnIg;gb;2MGMX;??La)^j(x-!%ZCKnNBlJ$Vaz@9;~2R6*(ZmA$ufm9fdm#CI9)-E`Ux^;q)o%e4_~Q9SExqZ$4AE&k!BBd5_A zySBfH_U?vM_xQftb6YXhND*h5CUo6fJ3cR5BfOXQpV?;cp3P1|DKK!Qh%wyp?Z+zC)Zr z+5HjM$#iF2=3M>fX-WNiF~Q^`R7RNuv|E$IF_Dqj7v=4{@SebgHsuuo8F}`0YFIk| zNoCSTjR@>T2AbzCoGU!p0#M~B(kfjucjz~?#^Va;2A|7knW~ZJcSXZw!|wOv zU@J7P8C5!MQ4<0j2s&z#Z!%Xo2!XIt4{BKdUhl@alLTQbZotwYZ%F5{;UVq&eB4>z z&3HV?lJRLofiWE5^fay@L32)}sm6+x;uMT6GjWaYq#{a-@;0;5WgpqnQUD7(y_NyX z#jmgxFXxDsP>J3g+|ve_{ObbRR7nvJF-|zl+Q%!GYma~eEkDaVT^~2yq$29tL}2_k zjWo+~vafR~Wffo`h1d-W?XMmKz%D38z9Wh49F6!G7^o`?@nps;vcK!M!UnU^nDaFU-2dXpt~{o02a0>iK-CShA6pGMHcUkn|q0a zEww9@YmfN!@t1VngM16D6@uD{(11DACjh01F9oFJVjXr-`1yfJ+5lv#fp3H^3e0ID zTgVsFjz0qRRJjM052b-bxuwTalCwxLLWd+90!~Ifd2^BFgvxwct`e^Y^m8RD#X<8_ z%G=$5RawA5<%;#@={NEm^0JxH7{U{RH5_!rbs50J?Go>&?CuFHgXDz#1Tzx85s4o& zn_38GH3<499inWJ;|AXA2CJB{GzJ#6fXzPFGB7t0F=yD$W>Qq;?z1;u^-WToP6T6 zIV#mUP2!wM0wK^xt|Qz-Z-*w*{KBRryG}OpY{K^}=g!U-FV`JEzGcvozn|T9fE&*Y zS_fYN6MM1M5vC3V2W!E;-y}et?%34|knK+T3_;B=?R@C8HNx<$$DFc*gJy?jlPYK@ zoJ^!kI51DVrNKviM13C1#C*e7s$ze@x?B;WDuly^-|FytR{bjQ;L=aa!^G#i6HAT5 zs0$&#U5L-~vzY$3Ai_>;R@#Ig8*AM7E2}7ech|XH@Y8{4r1;_+VccwAV)-=o$;c_3 zW6+ZOBmPBa>DdK+kWHxoQ3}?)9x=D;Q{?>HSTmVm7~1p>O7VPPc(LVy9I)?adS!jV zYy1hADhgD3Rb<6CrR~Svfl>jHn{)r!#;}dxjX_y?VAqfB+?uE4oi6K>jqrh(`FxYy zUA`;_G*EDUA`LuBil*D9KK6hh?n@`w#2n0#2GXW*Jg*=W-HI)E+mX=)R+@a?8=fCe znE#-ER~?Y1<2=Uzc5(!i>&u$!fhoV(-JRZjHMEk%<6mX03cu&)>a02`J;%QhP8T?n zvgBU?BqnOe$~y{npeKRRSQ97=uY3+F_f8~^OK|kF{P@x{Lk`*s*&H5)q4}i zOw;Br2h_GtnUqi}y-(*Czqd6ZwXYsVrDRe*s7XeVu)aNC|Je7SU*hPz-Iue3d{}GC z#5Q>xP?i~jk;17_mVXG<9=L`WOuJ{18@?aJKRGRPg&70k6(iyNWIY7@FRM3^K3z+~tx?c(fKclMESkkAQ%+_QJurkafhn+YDao1AQlW zqMy=u{4!{R+#dd@Z{n)}6Zer?!Y6pTROcU}tA!h-jf?w|Q$#rC`m)}Oh5L6+`=+#b zS)?=oH81w^%Oi~-&(onp=|WXm*6$pzdL+){iyD(_i1wAZhVha8w55lGuySa2=x^b` zr7c*I0fo9B=*kc?_}VFlOGCcuoZsX=1NPsYuaeo9gf^A1t7zful~;OyB}BWq&*5Z* z8AYu#!{0r|Ob=6*MmuVP`cZ3ZA7vNl%K%+w#}abd*9QUAQn>?Z1FTQd2?A}+5GFC) zlRZJ#h`IMSUzgn7g9)8$_}^Xc52aJ+DussH47wGu4Tq|l7Up&ncL8qYeO)L%D*(mL zdV+~QM|S|#=(9kEHb*0QiptL_U0ovA64PIUKg>Xcs3jl}8VCoNn@dCNLN?_3JZ{(a zHAW?zdjGOIkq<)+hkI36V4Q{g#0}H;OVxU>d&awrGo{Te6A0~=NgY;=@3mIu}BoX z-WQA5!wZ2SSRGrtj+@Fe^Xa3ltCt@eQaETC6~a<((wepySBy9$vwl?dM({z%|0buR z=e* zIab|CB;QZl%Q1t$ILFwWe#%I9JcK7bQ3ni`zc1I^Z2|TOncHS3Ks{Qac2*5qa6xOb zWl}~2Ms=@H3j)Cf@p_JDg!7LWs53El$pWN|GEW?+3jK4l#xL;|3PM$X%GzAA7O98Z zZj2naE!st~RmVih6YmOPFQG3 z#HbW!n-IHr7h-br33u>#%ccuos2HTr@f?jX+fw+7zrAtY11w~s{>MJ033;Q>e&DPC zbm-l5+%M2-BLpg}!iM%xSn1gL*yUdaReQeItD4^DtuuIa2aZRK?oQ{=c-j3Oxcp5w zU1}~!C{!;695vr8cOo43b~MpV#?K06!2s${R3__#m0}x;>|wDdXy@-Wozu%!A&R@S zOOCf;+;%5J?o`KD?YI7!L-J{j7n z=@_#Bk3-aUy?I8B4cz*FqMWHwu%$uan;3@rySe+prPo2!nKS8~G=OSX7@An$6f zVY7aQZwWcZHOwn1OAkk1-~96UJ;U2cnmpnDkqyB9EMJ52mwmk}NjiO$hv{xC_3dXtN3?`NReF7k;Ag?`%BW|FDP(&I{k(2XH@iTg$qD6l1&{ z1F6!ci%Hln!fZ_PWd8Mix~47)mTdMs3nf=L8`|hqFnDR44UUWo(EAXCji)7BRKX7k z$5{UZik@pFUx9y`SLQ+#U_6x0=Vkmf_BbdEt^bVv7mXgeXIxh6zxAYxd%{2}Cey*K zsnV5#yXK#Ssz_h89UBcJejKFS%2AuX^LBwl?bimIv9xUc=I9KTT$6F&JR(59K)qy| zGnDRZ7tU|rUpOofMlkoC8$_~d!PhPtWZ;vRcE06=J%#85M@C` zK@8#Ssi52GwXx0wgJtj5-9nd(BI3c;dLZEkQ@Bs(!@_ha>5wTAp8NOyKsbwtzOcxY zj-%C8hzUIA{C0}k;`h=zTLR=ECR6Cse^mVx!4{MS>T5=Uf;sMW+G;4)KklG6r{UKQ z;`9RB1C8lQDc*;B%&%>eI@1b-zwfv$VbjM5G+6Vd@FFb%qo;fUc_ce>1L zXpoMMcJ%+R<5Jd(d}K(kD2u7emZ^mHVJ?U!*#BygUsl@S$9nib>^Q$%=4+qxlEOx?MyGtpRggg+#znM$P^-gdX z3>Ij}d_9LG73et~jfcl2gzZKuDv1)T4i-6OfJDa(LQv%x*wc)}}evj$eI@oeaFE3$qp%3VwPeYIN zwEbW}4-UD{mp8PXTrxJhM)3w~foVq3INq9KfTWCz5Jf7LWTSvOMPMhOq!1cI@O%Uj ztrj7sylS>3R;kFh#{8J!3mwF@N|r4OPjTnmBDUP>1*S2sGcmi-1%fPz9 zPVnDq*Tj1uAjpz|Xf2Yo8}&c2cyIVS*E5>VSA1oml8Q-mCttVVpI@gpfua7G&5=p9 zAD3vDqrc&D`Q`WLD`B3@C7XQ?;`vzAdvG&9$&R~&EUI-3th7i~ID<6OJOn*};+Vlr7ecXG^u1#?PP^N&B|8IWsnnSAkI@Z+;h`!c>!I*es%$~z zR&KW9F4RoDU;r|GY~0Bxe(4$pIT=3WEaG4clwNJVjJcw2xOx}*g`Fr?v;4avZH+o-Xnz|atxW_>RXz5=5iTtS&U~r5ImgE8;0UTo-E9>BD{|!`ijZM)S zgf2iUI87A6s0Zd%U$R$RY_t5T`mi*jws@rD6N5vIu%AK9pJ1Flqj+JCCbU}3Y+6OW zuK4FIdcN&NXCS=T&9^-V08N8OMVXPws!%4dGD5*;HoK(rZL2UHay&^(iTw!P8?Wut z4WF=={JC4U3aCIs$3xcuDza?S(PlW%VWC^ye;nOcKiTB`e;g7Ww&gsW%HYD*RpLa>#1ce*L(MS|Awya>w3=fI6j{@p}FSoy2ndh6{ozde6qn% zK({#Jv&#*>Z|A|7$}xXqRf=Kb`KsDDYg7soCE$RSUbxv0r6*oD4Iu1ipk6msVCZsT zLVrI<0mhQn#HTrXDYm|KS>ymrTG<^LGY~7;J8RRFU$({gEGZ=KHyjjH!-$4ZhRpK} z!T`d^$#QlM9Z)+_`4>IR7(1aLuE^`iKD8#O@jTA&7K#UmpH*ZRfbq`l{opHE+d`zY zPr_uABY9&S+e5e8tOXZ+gO-~`SXP_AjDs)?Eu7+oMiKl#P$t}?(PEsK0&?!gx5y;< zR93kSZ?}WXP@y%bF#$BYc5cJS^en4U*_L>R81Eyge^xbotOC7XJGLtSsg(H<9CPQJ zKhU0c2ZobUIaU5?E}syH&#TF-?eeeJheOg7=drg}d^4gF(sJ`E&p0Oc7kINu<)ahD zKV=fn`L81ba;6SJIKpI!n*Qsg-tH{>nwk0*cpm~4{wi| zwWja7zdf9~TbRvaAYr4dU@fDODr=`RUNE6h|e&iC4bIEiNe8j z1pHpT`98Y)<06KITX6>L-wzudH*crmHY$Z&9V%$=sPC!3XmP*<%PO|`absKK@9}b{ ze81AvtY)*mZgm@}*+3BUycuANn(O?+vD)fXaQL~=$J}&qtYf7$r`Z^IY5n=eQ(8bE z1La5d1A<|B+o{n4)=$a%y=Fi58tG$y==9@lOZrZ>_O~9J|FR?lkYr+7S>xyTAf)92^#htV3*9SS{f!-<5?Iev*gHNSC_Ik-qJ~i*RQ1uNzHZbQZ{h2>Z zpK8`RQj0p3@_BXB?b9hpbM|X9Zkl9ctiW zTk3=^O5(W~gU39)Unc$-hR1*XF2W)rQf2F44@@!RW3O&sp>M`vvs@ch#Qx)OdzBFq z^CB&dO8ny`+WHuW5(mjq1A^60p`L;9UyU^hG@Vvz6ZxQ@E!CRhdCdvNaD zMfXUExepqIMJ00b-=rOS(u9HVSK4psm0ccaIa^fz2D*6z;yeF^x&|7NQU@)oB@CL* zRN8;&Q{?`zFq=ERkTYo4hcIai7;+ovqkH*dV;CHSkjcE_FHvTti~a|i4crYPk7l7- z>O6%J0G%Q?lCCS^pskhvtx#|p_)99sBFcT4<}K?n@6K~YILSCR5nOjc2;q*%YY5IJ zvH%e=7!Ow+v`7Ck3BgSOAb?vvkCP(`|ADk!x(Bz`z6W9WyT@l#^&jAFf|X^Hs?-G! zHL_-FpRPI8r+8iD#!4$;=sr#nmRM}$OKxxwTbuE8iTzz9~R^5_o z1Lhun95v*trQDkbV>tuhuI4UpYvQrq;pi+52kQ&)~Pc#-E(?elZJxXt>P zV(keTDAo`-nVOn5+?AJ2*B$L)x`t!|UgrefJ|w~9Y5I02NFX+2Hj}KZOsz=BCXN1zYmHGL4PDiV*L&>zNu6g5iP~>6A4)t*T#u4ST}AuWJ8X^j=dlI%oxvlgVf((jU?-mEITlNtF=fcZfVb z!q^~IG1*SS_(csP$>dglh5z*3k-dyS&l}*F(CX7!xq(5^zP%9h*K3EeU6!KhTjRgf3C*GSL-~hA0B^kHNr+ zloc?0Fa?M(>WfkbX8+=M)=WCRF52g;)f&=pFp@U&1f@b19&d^)+s5Y^anViV@pbFi zjU;NmcGD6T6e!U~)-2TqtvcDe?Yc(h{?X@0b0~9q*wj67lL%>oP=_%qc;{M;jLyFZ zpVaV2-G>7cXLcm1eO{{6XBK(CZeZHDp1j2v^y$9RzXdmyfEj>exzldr*+F#ec^tc{ z(q{AJ@x0@r5P*0&-<-Ssbh-aA$(oK;#VD-gJ z4u3{90=$DQcbB*Pl~ZEb(QQfdV1mDF zwp*D(BB|!el9(M%g^aj7sLpIa?lJs5d>}e{p6G*BX7hA!p}zQR?s5cDu83(rs9(7w zUKD#mKBl-3>0Ttlls#=Gl27OyGM+1@P$}GZkh8h$m;m6#I(OFC#^1ZP!+NJ;n(#~D z06Cj78LGW_7OQ4XE;sl9yEL(o1+z~K2PGdI#&S|V%{K_$lZJo;!(&d`p!*TW)pn$1 zvXpGbSN`(D_bZbqN*{_9@H1Q;*%f5W0+L?3f*2J;M+@}4Rz7T6RY16%cUbB!ep6uV zRKy}G8Nca;dSi-1tG0uir{{^|nNOLN`|xxoRn#q=^C$CV9ZX&1j$lm<$Vj&IptMS=r;w%f*SY|1HdleEBhB}W%4Hh~Luf?xq zaAzX@idjy}e((oyU+gAM4rQ~22@5s`U-rCo#tT?H@9!C3{S5uQ69&kIJ4C&(E$TE- zu~G2%a(x^n6%7R`fcfsX0ZWJ5%KSBge%(z}r+~S2E?oQ$vs&R_^?_0_oUEca-FCyo z$17D`$oMOUsALEgUlcca3pdcyPc~V*dTK&YI{)`Q$%e+AcuFAQJtn6x>aEuH)C z*vN$t{E@F0^J&pZbT&X5Mh?kL!1J3?47;ysD@!E6rS5h#>ELF?Ee-D5p8hyFvi>K( zf4bG}?yj}FGys_z2WI2CeNA6lHwBANe75&8WqLJPb^dXT1sM&Y=4N3&ZF1m&H zc=0(_S_AY)r0qkmUQ{SVM6>8O(=y2Gqs3uMiy27zNin{l+Z<;c0Dia6`%cXS9hwC! zdSS`QA;KK+yUhn~dXbV?EB)_%A)o*0j&ogxjF{aA4QoJQe{^smm#MFy2ZU){{0$7_ z_6ZeHmJOWAUMVYT6gYl?)wf6cGo;$=q)-BppvY|_T2c-0Er&4HsaDWKYQ;?y34U=+ zqb;Bu*e1t3=s->|M0$KQKg-|tKToTc^BWmRGZ^H{)SH$CZv9HBv}<)@HVd8O=IA|t z2JErlMnxDE!_-7yMP)g!ytl6-|exg^=y5p>)%Z=+>0!VirDEC z6mVaZ$C_D!OT3Sm5&*~!gFWwB;T@7N+1oKEB)iz?*8Gq}-{-8ypRi&je&;QLHMq0- z-w5PszAH? zeipI24>=zHsrl9U`k+JlyStz}G4n!>UCKv+9`erW=3h7_n8l6i0#-?t)6P&n66xYc z_(?ty^cbeCvLXu88Xyh4k%z~%HL!=`F_5FDFLIo&;|ORgR22*Wyr31tvqJy+DQQh0 zX`qac z&2n%g0HqUjW-}J&C6x3d0OWddz-Z)HV^HQA^??0mwJ-;7P?&qr%zR&-XNxRy|M~T9djjozU3nfX}``iRw z^P<1J0(_B}tph73k_@@<>hDEzi=TiO54P9!b-umxd3%=*PddZB2eVLt@%zS4LR)b2 z#~nqyuJt*+?)Pb9U(=*()0g$6m8*i&puMb!)(R80UH>Y)teA;Fr96v>b3(=QexJ*% zmTye;q(8{XnK}XHLL_-~pxf128kKHrbvz>v%joJr1H1E!W0_io9(zJhkg2r);~jVg zc3Cll)0z^6kSi{`geT)h`?Ql3Ly+gaM=RNL@|Dpw5uQ23b<`+uA77s%E`V7my;BjT zb#tQ>zcO?+h$F@9(jQ+PI6W|2aSP%)QE=erZU3(p(Aq1*tQVA!aF~LaW0v{Tb3_p^ zG6XJDsI%2PdKEGnReahA6B|lG*bJ`~gep2NKxkac{U=icKkJIVb1=~j&;gn(qz4|e z+DO$x)nYtxTDr@2l}7jY{>2kE)9HUi`3UN${WLx@=I55DEV^|PoVD@__n-wQS7#Zn zQM5r}tmeDrIb<~qLA>P}gF?ozlZQ~9pgDE;=*JQ%IY1ke9A|_&-G={(>++cx#dpg= zIMlb7{HvAM2X+kDXGvhGPf0UMfs!=Qr5m)5kawTIomKuu7p&ea{qKXbzk1U161-Ok z^d>$ZKo9JB7Gt1cQf;EVDnx&?wfF7?F`gtRKa1jf${!UF{F8=u8qEg&_m}VKysoa> zIDgsFGPbbH{rJV5nWp()nBz_MClf%^_>|Te^MZUK`r~;iJBM|0jXR+(QF;{2nrrsn z`&#e4Cs(Fj(_Sf*UO$(k^{FM<@MEqA9ULJn0lO#j z^?HiYSmj*|k*d_@AnUb!zk%C?xhOytc^r9uIJG4t+hNE%tN|ZQ__N&VUB}PB!@|$3 zx_{^Sk1Hf#hpqF16sX<%hJt;k)Q=e^H~qkZ@tCZxvjgYsW7FyJ-XyOx&=kj^`D;8k zQeJJ+Xwt*69N}%+R7;x<7J6g2QP}}uf?k!196}g+p;9Nh0lH%x_|`$Vr_Rw-vwV`%;_`YBN&;k4qmnUk=HfETH;Av92M8b z7U7H#B1{2WKU30s=(#wRY}Wjf`0d1{Y>l9guSzB4@_NHe)3BTT#3j%KKWxg-xcps8 z-pZ;we8aDY$0GQDN8cZg#!BUPWggd(NZ#56qF zvP5=#^JgP;wkl_V)e&EHwpzI^cl{1nlsJ~M?PO%>TDQ({axP}U5wI+Iy>e|H-t0a% z0_r`*qEP85DnKodQli}WuQCKSr;lV_x^5Sb!G0@2u&u_cb$C-jP+Unkh-Nru19FHNa_v~S zmkswYU`p{#iB#l!;x5x}0A~d_iM=KxMs5shp9AzJ{2}|LNLy4FV`pYhYa7;{OAdv`!vFIr@4Ab(>@iv#8 ziNg(KB98w^%e-=P0cM+p%gZhQ$w7koESGPn3`FA|sef!AO#-{;L%HY}>kh{_%_G~6 z_2mXhghZW<+{Z3`sQ1l*LAydtZ)3`qqcFxmJj7?VltJ`)xs;EGE8jNZo{nkUsTj3G zdrq4z8R28_M|4|%;qrqcgoxR`RawZz=Fx25C8QCpo$;sV0b1fCJv~N|%}nPoksz}JJ^FPcBUip-+;nsexe(m$n%FY@j0X~JD&&lk5~dgfIXsN>E}8s|P$XL-Zq7%|>a7l@#S;zX zxn7cze?Y=lU7hL@9}_|yiW)M8!5lGiI`-JdxvqBR)y+e7|1LXxP_~HQ-26IMf@<0; z5C$2}fbM2@t5_98o7kkQmJA4aXws*<(N1*g+*j3b&;wu}hQ+sBMok_81c=w|TQflnOl@z|XdX(sn;e0@ z)bz&80P_qO>B{8h);9UK^SF@9@WLsf*D1@&f3r2v2&l%ZzM~~78aO~s`YrfxXv6@0 zUP5r@ppStPh&-{-jNvW*%be{bXGd)RPt?8s-=#o;qb{(*SmDrllk$_ykz^T^a>%w= zjTy2fQM1Na;65G@O!cFY9E-RP0dZ;Qvw9tU9Ah|(ParE{1zQ`0DTkpEj zr}Vp3UxPTij1;4EY2&$d2r^CkIt|4X`_v#5W+?v$>cBjD^L`eq_-7^i|Ie|4;Oa&1 z4}5wjF(r7{8tAF+uFA$Hs@m#$zVXg97XiLISve-&vd_r_tuW%oJDa24eeDTIJ<|XQ zTw(UU7mYgTVzVm|V=qi*r!Vy2{q5qt(7fZYetU*j-;o?wiNe!deIZy(ymc@fwp^=B zr0`D6cv?O3mg@|xR$^v0}YfF0x+XRbBNf2Z3(Fa7$XsZAj4Wu(*PF$ z#DfJc;@);#4y<)nCF!${jw!($GEoMQlc~G==drIVE|%Fjn$PFe-*5%Yu(JxNEs?Q1 zJY%$O639QV`*muqZZ$DMlSO#Z^?m1K&@T$%GSNzx?cro&%0z1+GxTsP=40g$n?MRY z`Wbq~YswK|mhJh|{d#i`H!GEB<3x$XQt}EN=|Rz)6E85T16td3e<^TY(-3p zt|g*fGmWE{AGJsNHll63P7D40LX- z^4Qt#9R?Ax8DZ3s|Bl9cY9qM^G(u`qgvop6tfOo!ybMnZ^W}AHX((ZC6A_CI-C2`N zSHLq?ZI1K}z}$H1)bTnsX2ek-S=N#|bhJ$+>xxZlx~EJR73e6|@6%p<{!QRpD3f%2%%Vy+K@n(nq^2Md;SmMV?QW#O!mR$r z_Ljp3v6Jh-8vz?xP*DXs)>#<$5~hZ)T_jtep)mewZ*uEIMgqbhlF4P|#53PYlx z|Jtf?zx@~vd?~lsE4P6p zciIK?CqKZtXG%xTd_N#+ghW_dF1BX6gIP#4b8=Gt`R=JLP7yC3qt}RB_~?#I0u35= z-d3yC(UNJdNSqFP9s`;TDe}4itZt3O@;QeectYtu)UHZP(Q`SbyBQ(}3uGr63_KfMlQj&G2_*Om@jL_YEb3 zZeJX2S9<>*M^HCxiRh6~zAyD^ZoPAoTD6WctzuRwo&a)@2P6cOioR6i9AVv<;ZMqq z&kKmv>S)=_^rJnJb0qgI+qWV^-|^I??q|O@n)P?F(A0JWB}I2<>T5+%+t#7RxjUK$ zR2HmL7AaB?(>>HeB|li+VYDugI$f7%|w@g z_bk*(Km?J|qrIO-T;)ge$HBh)qW9tL8dR#*n2 z!QQt^Qfs|;59mp2_@34LRG1#%v#E>ee?2RFC*uPI@Q`E4s7qM0X1?xd>kb`1>->c7 z%$rzE9zGZXe)bX&Q&;ewso2MBNXWsZu_~JRNm=aOea~BhcadE{`%}%V`Xz*eLhMPR z1gTIo4lB6iF3H_D)AkQ;jW!~khaBA}Jn?=Fw?elPUZAFzaVpaK4a?Cob`*B-x$F5> z3X9w$-b2<3hXO&%%#T?`hX2l*?iKDXF%TMVB$NnjCcT?6@}u@}w?z@3D)nD7hWwLE zXoh*A>hrtJM(iE zC%9S^_`5+g4FJlJp9|s%f>#$hks83$Bq7Ka)-8eRN{O+KR(n~;2EVB)v-y-+wLz<0 z_a93)r*hHY#kIAMBHWuIyrrC{Wj2Y~)F^A-#&wCmaQSdJ4v`s-l7$KV6nP!(VbkHG z*!Wa$G{0qq7tb;UZ8CBv(-FSKq&(6Q@4*^b}-)K zjSWcj8Ijq)Lw3E?SW7ebruE(cnLH6&(My-rPJ{~NB5!7YaPh5DU*ONJ*6N>y#+hq# zZdQNb#danK>9fNfdvu`^`kx&j4wZFwxN>3atmfzN7eQWQdzNTL*3q<>%lTy~9-d%t zu}4LFQefqW(68TF7leg&0a8>Vbz$c>l}UL0myZ<(j(c=ead8&F3EuLA35C9su63++ z14No~*M8@ej=#~*Jsf%69a3X**){wgcHpcRTJvAr+&fNxcX5vrFQXn#WK(_Wb;jc+ zML|c$2lvd82)HZID&~+QX-=l7gA<9Sf#!sAB+LThIUg>K94$F9Ta^<_I~LEtiQd>} zw&a25-YtS)|LzZv*9KQg$l>Iw0RP{hV^}=SrWCyi>S1cSH$LKUy?Wd;2!>#iC*7L< z;%_5&DrbXdkR9YTB3*33NkI_@`8~+E?}H^VK2-bM+i!2jx%YB*I>QCKx1#BO>}Qzv z5nt4}PjlGm<8{+`#}wLu=Ec$W+BTtahzHh|wKU?%njDJj_z`ziI`-2Zo%LKvm;&G0 zbQT^Xw?Ssby?p}tiKib_SO_}CiTFtoaF;+VG8bar3ox>b*yK$V;s^)V!^!=q^lF2A zu;TM)Kewchuk-@crYP;wj{rPFZ?j&rpPDG#FJ$kXQSLn4^w{MPm?_`;F;j`5JWgAx zCk>RV(ftx<5H>!Tk=X>L2OtLih#s~>|AzLe2hdD>V~Kz2O(vTH@&PFzeDdF;PDRew zLc3%YIZ{!xR)4ybImBhzy|?j9=ks-KuYZVG_dd?2z14s;bIYJ0)2q!3h{@6-+r$7{ z5JNw?0J8^xjzXzGSY3>tBZ{xYFQ%(70;gyqVjZ`JFg6H~yyOsA;B#&=mF)$)B#S4= zScesf6Yq1kMnnHtnostI#$TgeJVRn*f5_aGT|nch{%JN zr8QD4X8I4PNj98+mE?CiiL7?|O|h}*$*qX~Yvd?yC{=hR%kaV0lneyT zp)KnJC;AI?*XXfKpC^#pAvs`og8X;r^O|=`?2^leYY8Cd%5;CJN{*>LEQDc3K-|bm4tTj1bNH(P zg$oE&VV4J_s~%B(O1Hb>4DJ>Opu36;O2$-LS6GjKTQJ8(g#K|ke6JT$n26-E*0(2_ z9w#q08(5>|VC=mRRZ+Z_`lD&9Xb*{~I^r`_p#*$_v$Yk!RJA z2gV<)Ma3Nri%tVIUV&N(6b%A$ySI{X-^YH1%l7tAu&$>Jhd#80+k=91WYH?Mj8CmA zYHeyB10S7mUvGkW|`u&ll+5!9XQfZ41d6Cux13rxN2~_)=*95ctZc za~e@pVZkU7LQ+8V_L$0m{zm`huF0ptEv4X%^AcCLF`F&oO+l?<>;kJ zvyli-2Hob$iF6)a`|kGtoV`v=TDCi>*pgk52#|b7diqwp6eavDs< zuOVf0!L@0PpA8l<4Aetm6HJyhj%F*K+0C2{eMt&IVd+b@5VY>p{Y$$f)Vulxzl#Qk`s%h?^7YARrMDTS2Y3_zP$S zxN&!5WPmnTad3Sy@MVBF)V*WS~e^Hh#u&=2#D8oHb(tqPTWB!^KCc}aG zwE;2#e0%?AYFDXtQHyr;>*?Z@j!C6>sU$G4}ZMpP&b_9R6;IC^RXCZj_ zRMsKH_S>Dk-urKD2sTaY!oXNFaj5%RBw0+G4rB&Nmn;8IncJ0cL5{5DVp)8gv6M)B zuHPw#Zgu!i{#Hn}N|^@Vas1^Z@*vs*y2#s0aQP?RW6j(&EIz5%`D~@QF0Y$hl@7)4 zk>B8HxR?8AaFaB)um7&_Yz~hP@i~e)_jlYsG}~?c{>uG*l{~!qoRjrZ3!*GwuGdR= z<_v0HHn!~xY67Gi^ceg-{}}%+*fkaCsg;WdEwkr(RnwH<0sGn6c1|7PK2+gXN0HMq z#}XUG8o0j_hrZaDDt56AaIq!mQ5)*J(2v_h;UiE#qIM+fS^#MAtt{j&;8<0 z1Y3Ix=nL{{NDwJK^&AjagnAi|7>v-~3d2<1g|{Wu@-Z)d(+83R9Hxf5XHVa4t+Eud zHX7yi%aj^isWsr_zbVExHqnI@D`cR2(b+}I-jax2-=g>>AuAMofloQy#oYlZ8(dD4 z@wgwUq=0GfOZcX*OsswLk~a^VIikA!D~1yu-hnz*8B6X^N$Mhof#k);-9SE4YH~;C2qui@g33R?)9>odfoK93Xqy)K+uD->&j?xQl%&uxegX*lk`Jiz>Q7Ac{6q|M zs3Kha*u02<03#NWEfXM+v1$=gKX*NX34&b>TO~8pZ z+s=c$zCDvz^|#J6KyQq8y4X=rGdk=1B|Sx!r&tMRZ>QW(^fdRs)Yjv+23o^0SAhcB zrS}6M^GSaJD-kqx<~DD;x>=Nxki-6dzY0ox$hcE07zj$ zU$=zyhAIis8CFI40_Xbv826*uoEhO%ed){#CL-QIqHl>P(q{;P{$7EI$5<#C!ZnN( zWMm(wv7KNT|8N??eyu%=LCFJQ%>#)EssRGj=-tJ^YTq?9iiwXaBEN!1zcw~^8sC?* zKRO!TaFqpaN9@FWQnso@fF0DR2d)}FNH8y$zn`zTAyMi+6fXa{+pTIKWFEQ*XgVEp zXDBeT4M|QmK3%Kbc=`dQ9jeM^G#mf%xo*zfiU%l2e^51hlI2UMG|CA9sX1O9xZB+#)F*@&iMemzTG&2 zoxqiBp7y_p8Qv~GF9OaA-FnO;>px)?I;)Qj%DZAyJ$JGS9yjLK)t=fvYk!Sp%DiEr z4A8lrQGxmyp+w@KJ>EINQqUnVIY0J21&5O6Y^4xJI!0Qu;9$nW*S=PKAq2%S;xfL5 zPXMu1Lupjmw?p5roK_yGdh4%!0A^!kpf}V$8yUhG!W_B?JxgZlAW0*0!~-aWJ2cxw zh5;R}a__!J!2qoL`;i5{#6qn)6W}w$%%EOe`0~-}-6=Ac8kN03X7m#DJ63vp3%@w{ zXywqzr;dczFmrDF2<{asJwVd9acYMVx6cPf+7J5f^I1Q=$nxccT)lQe#6jxB)&ipaA6dFFMI*}(5_ zj{Kf42m`{ga`Z}V1-21H=;KMYTBEuF5MDT0^Y$LgC?o&AE`v{+0^-@m#1fd_-4YCL zb=P%(Ue_m@ikEyEf=RfAg!0L;BhyRWQStH=ndv5^6*h`q*>BpK1~7;Ic8zx3>Gval zVZm6p2BIh(ZWnM3sA!2%Zt~%#p*2MrSk3pTfJS5-aK0x_PTP>L4%4gFB=amcx&U>? zmH^w-=~T0%*4_Q(YI}MCa=I0mS`&kwVqzG{^(7`SBr#=p&U+M6#;j~|p`KPy1IP59%H*sAycGWyxj)ux2 zDIk|`beUAAP=J&Pk<~yb5x~opz5JQH366g3YbZDjC1uP4L;InkW3W{(buI*8AkF!M zqG5cas=WM|_=J@+JZf0)bTl;^NWeVCkP;+GWN)y)^8$(REXRK4>n@G7TpZcUf(!~$aN^AOz_nOxLdlCUSVQ@b4lX|frZjTxP!XA;S@_naE|LA0}Qp7_6$pGJW2YCJfc=?r)$U1np~ zC`l?eDf6YbdpIU6$G$eycq`Oh?f42eh}umYBHf24!6&UlnP|n}KKdS?N(m0So&5sj zdV->%$g>bT0exw?%3Qo(LT;Q2kg5FaJ=DKk5}Plo6R9at_Pzf$>R;qrbN@p#_VPaS z%6zy!(es7I_)QsVbj(tv|F;0q{?NE2SLG1iKHz))+h#P`)B|EGcNZipU#y}2K=?Q# zSw1-=cdPymwAnbYonQuX&ja5sifUSZKtTFaAiRhf(EBI(w*lo0{5L(k8CwjwB~#qT&Zy)d*Y zG>kZlBr%UA(sHi=t+$zji{T(jKJo4l(#IZ_-HW7@M8;h?33x;x#Tj8b>I|#&Cl&Y> zDL%_x`@i4&U+2ERNfX|c7Ef!t^tN^h^-G32G2v!;Vk5FifcXU^fAGMNAiIYkJb<&w z>}frK@{We%l=HbWDhHc>KMZbYt{X~#wk62-?5cZOFo*OuZB^W+sS3T#@5)K+RjKNW zc9;F@%{=1ya_ONAX64=^#`-S;YkSKq336-8nNKtA;NaO4pK-Dxe^vHgA-MbiJjsec zl)g9R{MP@*S8!DdyuS3VWbnn|#No!_DRRZDw?3SPypfRo?o2dAOGxG0=dSEvSM8v3 zbf8NA{qQ1LueA(0DDiHcovgO~ndJCyQ+0k6ytG_v8{?GXqP?pK&XaOf(#zKjw<z?+B zS6z+cy_jffdp7)hzr3TYEPoV5oA{HdDhYmLE)aSd_IJbD9u^8MkZ{@S8?uP-S3tsF zgvv7qHs8CIL7K{GH76y!Y3*>;%c9_$hyJ9=wI%-&S4VaGtmU$-pI&6gBqAVfs5uvC z7qe&2GHiWv%(Zd_%|YU>Ss5xM609NjZA)b41~6H04MzjrF2JAowV4bT5lUQSnx9kQ zZ&O8iU@x%ie_9Xu{@R2cO!)17%_=YK%2b$XaQnwlAMP=L0qPFOR!e}YraJMI1IiPF zd24f(hF{aYD|2I3SM|r1KCx7VEq%RLw3gHI@`?gkEht~(AArp0#E_REprI!V@?9{4 zg&9|o(8*f@T#{UpT~Y!q(ZD?5+jSQSO0X}dV|RE!Lu?V8-t^b!r8tfLzjEPqDE+j_ zSed2XqZzrR+qpnUz6xq4XTG;eml}vku0tu5%f_iNZb&k(0$6$n>OReSNT|LFyB@s% zfUPh4{R4cq#Jboj=zz#xQ3L8mVWA%x{vQd?{Ia)ThvUheISnaApjF+tZ@LPTf9DAt zqA@q(>wYj^DU}koz#JvB5jY2B#u4Fec&5omR$ivE%^DKXI;`l3cJ_j|)*REJQ%g+# zorv`+>Y=ooBNH&+dxL9%5mr_FLUcvhC%6}qBQ+U5kU7Bw1)iD2TDmfnzbxq2s!Z5~ zqsxDRT;w%CgLGuS_a}eRTxesEBw32vbYS*JXXv9YF`>sVtNppe%8@ofqXG{ee$9FA z@aDE3+WGa(T)V|SThQl@_e&FKD5Wf(I<2Ae>1Cqxh=b^lJqrqvyv}$Gc9*`GavuOa9U_}=b?#B0jmEyMFzVRT(B~);I-)S{wKmR={63O485$6d5wRH6o z1Q=r&v^{To-ETZ?@LNhs)T~u>-Y;^9I?KD%Y_vW}dg^y$7XB|V zG=`P;z$TY7u^UJ`BW3}j;j9AW^Gxc4xIlT9!m6&TR}UVJWa^^R-48%jdd6V=MB!W+ zE|R#9BCAZu!_-wnT`ZWB{+F1u!^Vkf(4SiTx_0S(J~gx^UG>;f652ETyiem=++6HL zaR#g+LbEQhODX8$K>QNMM*a-u$|`rL@qsnYFs>1*edBS@08VXyD)sKGibkMZ-w9;z z^!F#ONPP8TxQA#Ozb?3fA2ib|P5_La2Um2Y&dKV44IPI1ou!VhUUWIVc3`qO6*;=uj}!qX$G(({LZl*fqI3jT=xm@Y z@^RxJft)E%_GOS1!*C;>YaDE}hjVp#WIyI3i#vROW1_5!Y>yEhArN{)p*uj3frwD) zQz?w{MavOW?Cv^8`wmKm_FIE@{~}q0m%5K(v)PQ!RT7@j*U@KYo`%~mIiO{3O}J7d z2e+}&dYYh}n`~S+MPYZcp|v>a90(~7%2QSD4K6;5C_!Pv;K1|}&AB8n*C?5W+qGo!Ux1?v5O4nUYSHpbP!VB@hA7f>0QvUZo1=DA zp7+H__bBVFatsw1bVByR@JLqco_I3hw>4FK3{e)*&f5_D?-Is-%G(Mk-me3rG7bfq zZFf|LM!=kS!wb+X3~JESw6^3p-e%jq$3iR3*nm(X6qp=L2{wCJf62|6UzeW$LlLxm zMd~_H8#^2`552etd7ZD9y{l3uCQH!Gx@V3z8xWb*4DL*P|KsA_8_WGyqg`HW*adMN z4nIj%=l1Q1UWpx)XKy)Q5O*s~pMrfd-Sa<2i{U^sQBDDQPV`<#^ zb}Jqmf@x=KAZJ7pv!s$}kLi}7uIVp$&2U|w>;K9SNf{Iy5C^}x`~}(uNsahN*gjJn z=Dzb1p!_oYP)FQ(?0)`=^rFwn6z5Z~2u=pZa@uTGy4KmmWjlbo`ft_c?uIG>n`6MK zK&Ni()+!^)pM}^F7hcFK$DS-0ZscIh7@D>K-dJUXGvTu2H-I}}Hv8X~>+NG9>xIfd z{u1x^t|U<~vK$ZFWTj3^P^F;7vP>Pl;18ei&Pxs`D1Z;tnzrd%?C&@>lt52<>3R8y z)M_WPCD`TUi+J&MUCkLkn2eYXFbv27eu+Y0&A)cB@|dI*)f z99RxBls=gF<&S?>`viABc|fMnFL1U@9E+*W(RoOsNP9gJ6vUdjda1r#@i|fDC%7li z)GB3hU-Hh26Sz46kG-AZ;~ene)Wwi}BhL0dEVZyuG0)TY$A$SmSX= z-pW0!fC>#W^2u?^>AbD?5Z0CkH0hdKH?`Fmu;wJgbE2g)6u#`;w0Z3kiGp5{4 zJfwPSGuSSZ>iF7jUOTH|;6gS2-@Rr?-aB(!kEN(fD&g<9qO?jLY2|i`JYzn62pegd#o^d{son(7dHZ;XCA> zvRNST1)rOh+t7gWOyWXzJay>o@7UV>O~Ry;5yivx#n$0qE@>@QObzs&I{k`q$x4PL zW;h#zCcEXR`3oKkzr*L{cLQw%{vUCk?3=HzI=;}AUaOG18t2Ly zS@H^cUCM(pbi7lU10~X&PPG5bHMiag;Y=(i{xBGl*0p*hxId+%To+a3yD@iSJjjaZ z`f1v-7%><6go-#g85JbIL@9>+Tw90-p!F4VJxy(P%PpY*CR@>;7B4z?NnQYVPHMg; z>PVz6Gey=@ej75g?1!J{MILC-8hT)4^yW zUAxh`mkSMhcp^@HzIF*nHjTq3=FO6)j|^4@r_6%yj?6o5rbT`fs5FcBE|@Qh2@6 zM3-SJyn(#i0iB~8!d3?HA5OuNYg!hhPfI3P422w# z&eM+oVvUpQ$*k7aeiE|r?Z^AU*CVMUKW%?^)12njAB9m6SqD{oj|eh7SGHoPz!r!e z5Gd5>e+D34GlbW3Tv1_fTdP?_j9MP-gryDkPUqs1t@hBJ7Xb0!?CJQ;0+WDTRHwIX zfN!wv00hZL!bpB~bx}7p9br>mMI~gg{;KI zRsbrPQ$QWPBqUT75YXcgHHTypS1e@9NU?Jk6L>vWM)4q0L>Pzuc0Yz{_q$Fh(I1ql zNhI!6$i-T*9YL@5d6RcJ@Mr5>vi!&k^iE1P>cAm&^FBlA%jM4-(rJdv`&y@QuSs8A z=TN-?f$%xFF;*3uRM`r7?9B&urZ?O2Ly;;U?&QJe5}V%#*R-+uOc$J`$sewxuQVi| zcQ%oNg1nZBtNYXG;M_<|;xvs6qNZ~J;~#nRge{Opskd_6eLmoZK|amf@^Y2{As?~Y z@>FVIn*3=pFE`m9@Sqdi54J5Eou!~ADaIkMWl5-a*9qoe)<_SP&zH-*MTUHE@Z=`{Tg8 ze8R-~AXbaihXP<5_;a>I=KSsXX; zVn@Zcwo|He0tSwsDgkN7v@&=4%RLZo2un;dQA+4FwbYPB2=gEvQ{Z=E=H>V_db5h~ zqY|*nQ0FdXRYG)}+nsCGG4*iP16afOYco-8(Ai&xNpcOz#jk&4v8P@4Zhi~X^;)oG z3LwjH!)xV$Etfv||K5eorjNpYXHK{grObO83aJOOL>Vq^f~CMPC8Ht~JeivDv%+Rn z;MZtdr|DTWtx+Eo2n1Rv2c*JNF1m;NY z$dQ*x)g}Sh7y=iod_MCtcx%9=#qOYoE;#&|1!5#5ifc?R7h|JPwFPyq)61pp$E(Vb zZ;h0qII9vHIo0)eU}u033mu-_EN4_h^Z}o${P-pIJ z;GKqTj<$d7?R93}kgKBKb;{6RDyv}?Jp1f>a`yq4H9hT$2S~Z<^!r|=@0NCNjzou)HT^bQZD(L49_6uHI%dwtfD5w)WvIX9x`P- zbc6QG6+D$;8F&)4(fSC+n&_e9F#>|DI;q2MHYC#y0c!gfld)5zNxFGKrHjSlVREdv zG`s3oDv@d@o>BoP4_8V7x7@dHJ7!mgG?tf*+Q1$t-3$c34F^U9$1q;yqwv@ot~|IV zc%2u8g-q&G8XzxSS>>IPQ+2)DD+bx?p#NM51Hw!KD}k?=I{sE2!1oHV^XWm&)Zm0P zhk%^4*EA}Z;W5O_Xis7fn}-!UIB(2urA_MS(LorKFI4r{YW91s!L;?pi!7u2CTc3K ziHyx)eZapP*BVEQgctvjPSWcSuJgz`X}lM(r&C8m8*%mX*v&xE1WU$(sb%^`8zMxA zMPX3Rjvx;hi>$)}W5}kC@zHn0fJK; zio3hJmQdW?y|{bPBE{X^N^y7i^M2=Canh5r>&s<*l9YLwmC^iu zZ8~plVC2`rJ6~huBQG%F#a;n^XsVXBb3vfbkGl2_p8pyxR2ynjkH^8+JM zi-Qg(2?S-7g+tGAG~4(|$69e81c}raR+Z(-PcrW2!teiv8H@-<7UZ^KA=U0y=1d-C z1X8o-={UOm^nCvxO<9czfLqF!Y_D6|k8BmIAMs2hesBk6`QxWFp*U^B`q)ayh+<(6 zE(gBjpMNg`e79)WJranI8A%`aMN9|;+kSEgz9RsX1v=rzf3`D$g_(RlC`1_-Tr_4r zAGQQ)n4xCE^}!wxIM069F?ZM{$B+}8(*HoEAo2hQtSg-;99R>`)-uSVGcZ0s<6xYl zB{6lMmk~mIr{kdDVArzw?()Z*()vkf{k+iNG(y`*;!kzeZIlJ+X~PdhxjH_iGAxzG ztrT=dkwNp>GlAGZNp(P#pU|VW^6(e38B^HdNXzXHA}Mlg*2XKa2{?3}s_PnwEi&x; z17n<}kTSyVr^VPXBd|-_s*;3gb18r~-8xIUL)JV3#?(xvq4%G;#Cn5`@FNnrL`w_w z2KZLfQ`Qz?QE)#qp{!p&U$sA6C0T&rr(j47C=EjH_gpoGJ<|KFuhV)hkUY95xW)Ic z#oslH|6usg=RX`wslrX5FPIVJB1mf8-iegbyv8l$=v&xr`|5J>(d+1u!{(5oh(xEu zPu^aA^$KJqHo3`+eqPm(GQ>(A*+gt)l&tijDD#v=CJCnQ&|R+)?SR1YsnQ?#$m<84 z`%IPI+))iDAxJ$P)4Do#!n?t=k{&`vb|MdF{%-}u6m}JM*~GKoINZqAv5(=}wAJju zQ~xIG2mrVw7IhIwu;!|v4n{=%>}($NXIwMHa*W^pjpXVJgq?K7-t;AkyP%tz;xfz` z-87ne5Z8ws6ow~n`X{@_v9LW(u%5vr`hU+gcGp}tis4-FfiFvoo6d1GjK4FNNq{-8 zpLL@q;XIBN;Dqtt;&=vvZNYyEY|Q$`aHEM5*Q6fc5)WZifD&6J`ub)5mNTk|3YJM; z#?KDUWu7nq(iS~i%FS|x~OEe<%OTbTx z7%Dwk8|5eZD~1&A%jm)e8x^?a!fon7KEoc3vw&3^$Nr)XfoxKPyR_P)+mG~~(sh;N z+G)r}?|5X+zvm()#$$GreXyef?pY-#eps`nSCiPq&jsdQkp$z&mAX`T9>N!eV)1xa zkPsPwq%)78D9dvxFqEK|yagMfbxKrf6XNJvdz2LgM>$gByb``76Fi5RS>ji7q|*4M z`#>P|xCw2lb=H`eFfOydGS+#XhnmQzJ>__#K}1i_>#Rk0j# zeo;aB<{r#UBUt95_d4b?tDdjd&kYI=ge;W9c#APpR_8x0|Nlk76r!N0NEiqtNXADM zB6$xVcYj@ayxJ_UkR?!riSOk3cEob7*N||Lp4)O4w!0ZbDpXnWtk>XBSjkX9K^$Rx zcFnD?hD)N>cEH|utp_H82(t$p;&s!Zv{JE962HM>t;M@E9wX^F2^vT-?gViNa5eNY zqu(EK6j!!Aw88RXDH}WVclkx^ezoPU2~_+Rfw^H55Xrj55Gq>|y;_m~eUGNiusp!% z9?NjTaL?35$t*H|k46qMl+@}HZ%nmfc3;)5>eun|VaSIM!hJ)d^H2vSt^tRF-YvNq zhkFW%Y){t{come~6uSyF2W9E_*&n5b&um4Lzr6^r&LRJ|CDAN+ z(fq?rcduEbu&}2Pd`=moc-Ts*^oe>Bx8K9*)0@@9-=LY+7@u>5B2Emk@5uuLfbt>~ zh}fT~ip4qj?L1g){W7cF_V*s;j#VldF|2QkvOD?CKcH3jR$_N?tj%#F=t&j1DGhDb%GR_e6A#^8t zG$@S61onC6Q}`5Gg0y$bxav7xoeDv>na}^x4VUN>-xYxkK9l6iSoMU_cECK5i8`9_ zjgs^ugelatsQ-S;`Z5sXRjyUQ@Wl~*aezd(TrUT?43#eqc@8~rsRG%~)sD{WzgLMu z^Xj`L#^q7jkQeRjPFp3?8y_CU!w=b`^CFhM^m{}7poWtSbpr- zW@&xAuF01-GNnFQgAIr$9Oa?^%^RODpJJvxoe0wWKH74_IAFMREedga6tQh2xXRA< z^fmC8l=q(=6dvr>HrJ8jFz^EF0>-g3$rBH25)MSnrz2+=DFtNb%uM&5a(?|6Dm|2% zwRY0Vfe`NN26?E*7|)0>S%oD?cDMdc@B7s1DpZ*}LcoFA|MN{4d$A!9noGAJkWIPy z6?tnjF&)R{m`3tKIAx{BH^3x8Gu?V5od8pbGC>iL@vXKT5sY|p^xt&u59$zV_8>6= zt-Id`o>MLsg&!#|wBe6E2v)>06HK(ZY zj$G{vi3}wueKru%s`KGUYc~!O#=Y2th4W(U{Y;#8OIu&q-aELNy7~x9kyS>#xH_8H ziuZG3hJ17y=@5cdEfJ^;7<%yY=*Y3l4$7TA56XJJ;1h>gn4@(6Boo$yjF$%{Y`D%i zoR?ERv+~3Di*tF#hSroHNxXQ&$9q@@4%vaBVQf@f-LkUnN-}JJi{sM6i~e-{3P~7W zh0`fPuV@L(JFnT-1XXF4o6M2G`OdDspcK7K>d4qI=Vk3{7 z&aYyobfy%(=^dDk?pXOLCx$RHLww#XQ3hMK%&Ek3gY2`Ny`r<7c8>zDvEw$$m4_W_ z5gl^lmlEPk<5IdMa2?DM$O=6U1&{$y8rrlHRwWO~0o_v6YK_xWMw zh&}u<_dLH<|FL&Mw7Sn4f)I1@!(z56+U7Q~{^s3J0eu7r@uE$S#g%oHz)0*SwGwWXBoUyXsRt8UKW{0#K~H@-Kis!kz$6iF@Q-Ko{< z#j%Fy0}U2jdQ~KT5*@8UXUwvZHwN&(1d%~ z#D}+P?6V!9JUTm~r)#mVDG(<`eR-)Y;teJZl<|N8%s;!`z-c*H=*-?Py2cfBV9^A%B$9+*jf(Kr zgM?il9xOREYC*h$vy*49eg$Ck6B(_9%2vNvl_*^@wsOVMsX^lutYj}o?gpK;v9vDh z{C(|2-SgD3b=b!D@Nwl%qDyL{>G8tgR=h{Iq{9tLlIi|FN8CYvVARC_-T-nGbbd+J z0L-`^qaflySLNRb3#_zN*0d+dflKISaR7{1+;I#1B*gE@Ep=@RPG6NKz~!VRUXX}o zcmsilYu`o9_=ptB9wFQaNN)|Fe=!@T`&oyTQtv2;(oX7+6%n zuLW=Cw)*$hJb?=7fP5IK{SYQJU_IMj{v;62=9*Q2IS&xl5K7a`c|EUjRvN0upkC9S zMg&)yR^9IsuFxf--bmtoOs0?hVC{|m1?xf$J5ITqi{j0LH5z#jzL-trpGjF9ttB!Tf#WTE#d2d5Igt-A!9FzMVKWwi za?l*MBqA~+7w~>0P5yBX0OyaM4!8L>c;QFTCKCqFnviG%M*QdbNB;5>w#R2frbB`h z$sSOXI**=2rX*A$NMN@jHi%hmk2p$pBgJkTi;;QHNXB>JP1lejBJ*5@k~kFdbG|Yc z>wTlFrP>#Z;h}}l*sL}f2F_W`CVU`Ea#mP#qFZqK@W1}U zN1eB_EZQ5pfVP$vJ#5!1w|-4-P*#w);xQm?2U%gDVZxAisEyi$@Gq{vU}9`dskypm za93xsFx8{~8$X4~@~?Z7s}~@gq0jTBg1sM1K0*@4(y>poS~UykipovWrGgC!Q5J~x zh@Hm=GX8r(+7fnEnv5wFVO_~>lH`}4mK44T>)gHIehoyC=s7DD>M`j|#C=U9%Xtow z*A8KkcXSl$+v$JDeZ)EL~+U8PdlR0(5by)n`ZF~Ev` zi}CuJ(`%tao{GwS{r^l(WBS!wT^iW%>N%MO6JtxldW3kTuyp{K^d|p(^lv494h+Ye zllPXLv+$o7zXSSVnFXoMhs6_bC0sIFe=q;&YxWd$E8W9@*ZtmA{;UZKGy^tchp`<55mKy23y8)bR6|WKSujMVE4v=p{R1E6`p<| zp2-~&4Sgc@RPEHYIpW^BTKJjvzr6yUGXT>dODQhDwOl%uTH=;3^#+av5B%fiK z3r24i0U2@X$wZPBr+97fgkP&sk6MrsYha!hMr^sDNy~pldj^Hr4 zDd%biEY;S?PyY}e-Op=0DiHv4`~L_0n+_sSQJeiDbe03lTK3iPPD3|KY| zS9~rM2Bnr|%tO_St7{IY8AgHr&VMU}@#tZ+oaaj3PAgYRadnX1=nDVW?-PgQYRG{- z7JjgFjX3$>3^JQS*Uu7t+b=4n{~qJKE-y)L5Q^g)0xg$1YvXZp-$JVX?QHt8+GN1q z1Y#Jo>D4PgY)yw%i7GUGYbGV8t=*q3QAbQt)Acn*DoXGkja|9hrH5L;o}zs-n>_-@5z6V1_5 zh}k%5>50isjS;fVhhy=_fP@BYoptp`HcQ$*9v9`|_PfSf1T9u+77AFlLT%6jt$paZ zq~oBs(;GGa?P+T4WWGtBIL`28e*e_x2NO2Vhfk1A&P|xYyj8!&xn1uRJdxmyQ>0g_ zh^DOd#+Xz4D`vD8mc9<#bs6CwX+2`4*ycy8|AqNYD*H&dvvu}Fm4ECbn%l@%pJ)Hu z@!l^#c0G6eXU`t8ncttD60NT31|f%S!ah#|`f#z4+}&|f5?;qjlJ(3O8skmG-FkT@ zo?^59rLEbRwUuvFd0DR?9v_PPA($qn!F1rDaookD`v9(N_%WPG9iuoRX=jYDNAZLd z`mT0T;Wgj})gx3*p`8^aJEeLCJh=R($S+(plu*eJYM6DRS86e9E4Fqb9frxDz8Og> z$Z^BMebJ8=wmDI+DTHlXL>X1BHK$kv`G3#ZH~lS)ix!>Z(<{g_(_it61JR>?!~VO} z9yU(npR~+|zwYB8=%lqc z@Ol|=PqIKoL!MU$;DbLePG^wU5kZ#Qza90IFATZ@8CT;@wgQHHfcOI&tkNK#0Te{i zO1ctqb~lmdXJ2;tpo}!K`oyK(;-ckH`C+)#$v(4NLdob1Qh6vnYBA~wv|!S|x6ZIw z?B?_3WXlpCCW%V#qho_+zph$u`n>5lER!TOp3?4pOQ(dNzME_#JZ$<&PxdVhHr%fnhMHwLdVF zt1K|jvye|4{Mq!&s|wn@yav(G71thE-?isd>b(72@S^=f@Us0<@T%RL{0?h_Ai^5_ zM>9A7CReRuND-uTp{1^$nYUbE_-D(n*sL5wEWS0SKx4YaJ<5O#Pb>*w)!N!s)|RM$ zcYXk_6FOjU*qEa1YRwt@o-2TDwEX3+jpM4DU8PpRQl-+-bH?8dRi#>&d!|MOoAd0R zu~8%dME?hhjWm|s&j3oiY|dNa{=TBo*xgKDlrNt`WOlQxR&3kOQ*4%&mq2{f`n*4BhwsdIHYC@M?EZN z7HgWL*Ou&C4acaE5{Y(vzJ&iyvtV=`4Jme(?K8bB%c| zyr`nQ^QmZ=vQ*{dw6V+;KHkx+FdY)ZC?0wec17Ue)m1G*crp49O!yOhb zbs?QUJ!zd}>-f6;;QB__AX90*9AihKll%2B2!i$#6X}V+$wJJ3s@r^rf7<2(1rHe8 zdc3P7oaUG`Q1$2ALE87#IkGE+j}I)p)IXbbbWM^oGYpEej;lXiW8(XK5GK!woA_hC zUw~6-}iLMl)KVu81^4_dyVB+Jzrbk?J z8LI4M5p8BiXTP}z^}nA_R6{0uL)xReY#RyBRVL}{g_->9ZX^kV$sO|3ZqG~8giPy5 z`K?0ueZ0M)xaB>11-5S=wq+;Qzi5XnXSi0gk-PGcMpfa&?v$ znuW+3x*>|Cx}ZLJ!%QhR^_!%o1WMn6;pU}W)74X4Zt%v_L%H8s1KVpy25d)8nUtq` zz3uomW9o5$`GB{SA%aSMx*E|{f3by40W72bZ?#H@0?eV*kAcZ_Kw;;;))imlMlWM6 zLN!W!I~f{ENb9C(-Zt_mNgmj(M{)dX*eh#@YUxVF_q5aN3F$9dn7w7J!7}urJ9oFG zFRQ@JGR?Ale7D(RcNI03&{+0mc=O9ilxWR>9jQ?4%qzGH2lW$TW)i+B7a04Y({idl zFRk7^@_iWcY4W^w?|8?7W~`BDuqri|#u4I>ctOvQ6@i2Ts1O;98vOpozfl+&KGTy7 zr`BwO720OOSr`X3!2da18^`ROj!zho-ey~#z7&QyXtPn=`7c!+k<15wL^Z$<9g(Ls zMsM&VQH}TLOG?I1^iX7r54$DBl75Iu-r5S>f$rNY46l(|8s^LaY-1U)v zzs^Zl%RZbA;85q}Y=q_u0y`Ez|^$@Q@b^`e)rhi{~>gueN+T%EpY^;iS?U6V{NgBU zuZ+nU)dm{)H8w|wJ4>39EF|)Q#}_aiJ;RAxCOxsL(WHvu2ZwSY{Vw8H?VS4b)6fO1)^)l0L}5y2oh`N5Q{K-w`xVCsg&0bP zOl}Q%p+;NWgM~WYg~jRcR_fcq5n!TUd^=QZjG~3#s`k(F|9p|HZ4S%Bt-N(cL`jh- z=h5m%KuTlT>e*I2QUQSO;7YQ3lFuZL5;SU5luKL*9TkA2EKKm|TXVkMo`IdSn>rtHlz&kqDm3+@cC;yo*C57$dR3yxe_qp+#I?su+O266Dmg0HyhK=fKEK=3pz$~`@CcU?@16brdKS9?#TXjH)#Z-M@ zTCH)R6)Jlb--3@~EId{nR!c3~@#e1bznDyG+g>PAZ_UG+7JFqYm&%C>z+g(0mWw-n zQ&k9rfpSQAoLiF+bZ-)6Mi^Wea>DE3XO4bGC(-Boi#WuHU_2#3;%|0_wFzE=%2+@z z(f}se&)4)d+!}3QYt`}=%lJ@^JC*Fp^qi{wS3S2H>0#hwpz*0NKm0C9J5#zmV#JVF z)=$VDHXP|^gRd((8XFF@H9M(S7FfK}J&WP5xOBW%V@X|gAxIct=GA155H9W^xv4A1 z0YO$Pcyk=`2#S4qqNdpJSgPF(8ciuL-=A5zXSSi1vQ$ltFWA4aRLQzh%Q_aj}fVi!EOE!TcOul*V!N z7y@R1h3QBhD%2Kkt1tbd7t1FvIi)a?QTt@}o$ZWL2nB*^#?npfNc9%q<1YGPZ;DxeGRh#bT1=<%J7@v?Ik8gpPo~gYiBP+HY3i2Dj)U>QIG1#F zYLjh9#|Mf6xup0@vP!felx9u(+n?AUA^$k5r5AE%8H!HVUTEM?5Py11>UH>kI1FyE zm_x?PFi}at*40~4byNNXg$5+n1jS~dG$wQ7yg*c>1beNx`Y{H=8K`Lgv}&*k@mJl> zbRR&k{bC)Y5one!XB~dheBolZV|V5zQ;!SlE!K(MhvSdU;KF<__MX@cYEX@CKJy5_ zPDQWYo9F?YOuGM9{+9LzqdH^OgXbz4LykxQMu zeG)7SA+Ww>gNO3q*M=e%EWHE`jvKMAWAeCFE=inr)kN*im0bVKw*D&PmO+xjoq>Xe z3qkEY2p|2+Q&h&Jy>EW?GO;1U=bJJte@PcRJWd*&VD^-0fO@z-LjLo_IcO++(7qn? zd=KSWcfjV%Zn>D7oxc2C5`R(`#T-+^D;aPaXbb&5c^r-)&ceK|y4ixHWUE zkAbtc_Cc7%kK&3cM7Vu=WL#eLK!^AEdClCbU2(8}AgyXgJ2icbS$dataFra3Y?7Fo z=4^fB=;w9`BUSZ*=z`2vGc+B{%EKir~L-F?D6|8Y0!~PX>G0KHNr03 zE5kBI7!DN|<~$M&WeX0u+X*T!CoGIJ1r5#H=VD6QY}!O~gp=r5BD$L6LXuxgdX zdQlwMvwrx88&XK94P1-`8^Y_Yz!V5WfQjA4=z_QHMyz@oj~R0K$p+RxzXKW5(1P{b zhgn*-hUJ4=X;mRd1eiF21PnjMI~Ww`4aHy`ugBj%f@kvdIB013l=tt^jA)XLp5lT; zJxN*h{~#E_Plr0Vof%w)dw5Xy_eCMlQ5wXOvRj~?wu)3+NO6?{`E6ox*$3Y{=Trxt zWl{**Zr9|t!HtteiOL+QC>b1NNM3g@`$P|PUx4lmMfX#?Yh%XIF_wA4-qmfv2F6JZ zm{6!=#$)Jl4NOzN;X36X>p@^_RasBc6@u0Fa0l_TEMxn`xep*?DV_QQWR_R|#}jX@ zYh0}*(GvnpMry37vL&wT%(-5!6Aex}o9sIVH6+#_f1+d#%4ATD0U@>j9i(H=&C!H( z*g-;z=&&GDDo#qIXaN6uIwZfE<9$nruX?5!JUdJ(zYf!YsfD9bsA}Kx11^#iGbt8Q z%H6sG*WA&L*&2lo_RBH*W3Dm8wl~=I@gH`DPbCK~xTr}x@FKw?Ei+uHZ-)m_KIcL2 z)O@(zdY4+J9(~XaDDbL|i;2-d<7<3N4R$Njg@uKwW%V->Gw|f52P10C^l?2+BKZk7 z8S@;>8f?5NReJ2=1GmKZd28Lp#Rn~JhBHj+IT?oOZso+06>!jY=g7N)<1a|RJ6;0i z^Hu{mLM4%Sqf~8Od>U@Th&g#g*2iv@gCD-I#{k_RDX!BM8CZUU{SnV8GNh9P3khNhZ{+zYr!i0$%t)MB8 z#lIJg=G{(%&v@yxkFBC65(DGXW_@YMm+OW8z|V(%O5tt+Awc@z1)C4hhffJvG~6_sptTiRP5hynp5_s9X9)x0>u!@Jg74#Mpc#$nI!;E7YgO7ca75 zW$^{QMB@nzpF{}MlfIieCS%N4v{UViH7;STg*_uQsR@+lQh8vf3#-vRS zflx^}@LA@Se@Z`MJX-A}4ipj@VddEbH`B|h$=CeQN!3viw6deuYIw$pr!Ip~g zprv{X`K$81VoslwG?&u9tnavG57{h_CAKf`LN`$?5ejW06#az*w2u z%UL<+%Ui#MWS(iCyBBw}>k8HSkIYUOmmP>ZW0S34g7NTmB8I}75YtmpY^~Fa1{ur+ z85jaws6VDOB3~hyyvxW0{5GdLoO_}P{1JSL@vb72u|PWg;kG=7rnl1fcbr!7Tfbeg zx%Obp9#6egQH&NJUj|5o?t?Wo*%gcBk5lK~KrZ&_I?Qjdq8@qH!2+N@YuD5P_ktv| zSNnI5+Vt-G4gf<)ZkOA0g4)Y)wj&UKDF7}a$|2xH9QwPGUsNCVXeNK*vA*J?rq;f* z!sqc1C%a>LBoQKcMiBdchMDQXcgpAF^C?Y->Qv*Yd)e!1b|^*sB4w55&Jp|g1HR~H zwvgttu%EXg5!GLl7F%TOX;+9y;5%vz&=G;+riaq7(PW1Ljqn^tW!Y}H!qOMbW4@07 z8~`rg$0Z2*#Enr;k$KuSyJ;pRo=c5~p3?N4fWAH%q@KFpbuZNN=Nempi)m!q5x$BX z7a*>0v0$ke-Y5@TMAP#*7c9^~smDf#jTD5C-|MT#0&`GON~ws}`zvf}ipT^&u=@Ed zk4-FZApw9t;lIQy3{Xj^XquCJg&8y%H$AL*M1q8Di15-YYY_aSI8$Ul@nw>76y(jaR&)8d6vMTK5Ijphn&?ob!`U zLSHPig?x?v%~hKd*}s|4;s-&|z9suTl$FKTvAc`{IgR?!IPD$3kK#4?a{;(7PH;C; z)yK_g_Qa0}m?1s6p2TOW;o`Ii8{B{s3cceE%=~ug5fY&y^H`G_;7KP;R27vM;(Gk? z?nxr#BlV}rIABoFtP8WIw3oo6$Ej;PIm79jA!X+;<}TEno9%=ID-lc#V$H96y|eL1 zV0fPQiYI0xOa_C1U9UsAAMEL9h;a-Dr-+WR(@j-qbuBS$%fzK^Q@$R8;dTK!o1!6* zny-L>oD+UyVr;yVFvxZS86erjurFV5wfmvfpwCJx*JCcY6STG<7|Yq+_g9A-o6}cmrDuH~rPv7=3UyIp?EqTSY>AK^rR! zA+GK$4Cypea5$U8Ze?)XA1+f!{D_2%PNenxg1f$+zx;xb&6$1TjQ;DKS|Hd*eT;~R z0X3~kC0&Nmf2vL-LtpmU;z!ck??T&G#-PK+vLR8Zf0J{8!A=quX$By1_Be+ia_S)t zBPjQ8zb`*?%AZfd+JN-RTu{jJm=9nuk{4pz&o*V_hZM&i;^WV>$4|XiP!y*PLfKpp z_ViY4{1&M_Goos*Oil6|@^R1J)DL^gImqX4+Pc8K02w3RAD>fsJf=usUGfmFkN?;K zoo|qgy$TPijLCLEvHg28xmE|`!MQ!N>q5L@^NH;J+ZS@Pli}8U>@A{i?j;oBLYwW= z9w=NFvTRT&0OaE1x~2iRQF0jUG(Jk=xh*2PtMcZ`=yy)N+=w01{u5Of z!6lq88d|7mf_Lf%ihr^kaI!2|8FRc4o|-1Bxq@2Nn$#x0hW8mAUWGP)B=HlGz)(Cj zHGrfPZo!Xu$*z#@bZVAJwz6@O*5q z7KUt{?+Q*Gm#pZGKkkM9WsqY?;0zuf81a&k6b%%Ofd>p)9o+0$<#+_urNN}a6pEOf zAJ4Is{l)Yo9sC7$g=Zud1NNuY%SOni35yr>WYW&Re$iF5@`-0pfWKMnp;)Z3f3j)< z!%y)>ii1ThZY@CH3?}Q`f|#U))w)21b;e!3FYqn?y;cZ z$<->o4ODBBa?2Z1A`DoJzYkNcQ9^8}<+w7aYHz1mJQb>lFFg^d&{=`Dr&y?HzWHE1 z{<{x^mJPyoAuDngOI^RDb!X>T{*YOJusO7U9JgS=i$8_2K70)1OyPenVIn4H<VgDg)I?wLG|OkXTPptP3-~l5e;*Ly~-%{JCCss zwo%`c>TQ3yQBG@Ppg2%&nm4QKaUHj|D$qopYqj`OyWMWAv+ztGVp{+Ar?NV=^y+YO!lj0eWXNZ^v0O}8cGPG7FsUmRIareQx*ob7^&K{RnS)!4g&?VpWuhs&m~I) z=+JKb2gq;Yu~R8c>W*IR?5pPl0x^iSvpGx4?Sj^Y=k3~h?s88)r?Gs=*D<*loVnns zXoF>{vSRI>%VO!6?mLW_Y&jr8P(Fo+kst=|alB$BjT86g&)v)BpTeaTpbL3N)GCRPgUVauL1(8Vk zn>lQEi^--DS$Y?!4DqQjSWqc?V4v**ghwc4M=FiG5k_7W1wqTn(2P^TSc!OPs?8#e86`@%@(I_gB z2KXDpZL;;V;d{=A%HLxp7Pmy~Dpj8P$*&Znc(4WM*f;#%q8JTeT{$NJ+Yq_z_@Y%x zYG2?t4Spf`pU9bs)3Tp!*>lwR7vY}*M*0XI3Tek&85+*X2Kp-XYM};&$kogxiE(Q_?KLHT#OI1l$4V>L#KxnLBj;-Uk^y{Y zW9D1Vcu7;nlFroc0f%(k0f~fYa37TcU-I?(5Yeo~vbC2OshCdJn8u zOTGj6E_M0;erZlBTd8%5rjdkRONacRuic#R7Bq#=TYAQD3k*MAh0P|c?wF4iu9hP_ z%Y8yMKMGH@G7PE;8W0#6?rU-yzp;l0G3qw9JQUq47F!(Z%Z1^MUpcb78_nG3-SYsz z>1#1Hzc79ku1vTfI>BEWo{!q)cfx*u+ZD8#Qt=Z5QaKtL$P}QNWgU^pA%rn*gBAk8->5qDLxp{6ox7;&Lw&e9M_X3TIdJf00XJXJi>|H(k zevJOT@cy6%Q4m*KCQ?qs;%>G#x%-iP|f-<)9n=1rgR5TQs;mFh(MM@NV4*+fqE4T54 zxCwG^pbmq5=~p@+*Dy_vuBl&%F0U{iuHB}pa)v&+$#!@E+8XN8l;EM9m^KwVEQCoV zPd&}xBl0MF2qGe95pS5;HCN|AGcRJX+pwD*UybqY?HTX>SXfFgU}a~wYLBasLj<&D zK^Bs1Hler}J967c78Z-S>#rO?A}_a#+QPf>`tlKMkg#U49FFQ=ilb%tU0y@eY5ZY@ z(M<&Ygq*BsiOW0Rz;NE;A(O?1yB!70;Q#41m`S%YRdpSryT)-Ef+QGLD~VWJpkJWo z$$1eIkd2V@A5fJ#!V63y{Tu3RC`f;0D5phO&sq(Uh z&2n@eG;krNG;!{@-6XyvQkI;x_Aq$UTd8JKJDFnlka)7Oq}p-6i+9R=p0NHU^|-iD z8^CeACSpI9UvI4HoxioTBK7&sf~RZyx*)jjsl6jM@nH3yBPbFZ39#7V3 zXIs@WE7SuVneKwg^O4N8jC30lMh2{K*JbL%1~TLqnXpsUEEc9mo86J0SCba3{VFE5 z`Jq;(uQ!C0PYitjo$aq}TX7>!JTABLM`T$$>j0+jN=b ze*YjG`!P2%dP=?($`For%HP(>q>|TgxL(k1xm*#RR_9Vt=x*`ij4>S?ZZtpt4raAM zpDK$$`bLz1&6vG%G{IK-!c`&6p)rJlbjP&7Z=mdc)i<$$sxp6IL(ax@gr>3V47ifg zz@0YK-S^?NdIimRI#|#A(vK+3c^Ho<7jqz*hm}tFxuRkgu7q11GYwjUmE)(=G!Q*)e+LP$B6=H`y|YE&kog)zNl{As)% zX6!e@z#zHBl@*xey$+)y(XKp91?b_LRGKlPff#cf_XWm?U|4I|>BN+l2y`f2%uP&G z+T#v^{3lPhtbKtAa`1Wks#Q}q;V49ntH$YL6TpS{_zP!y{G8*FfQ?Ac9zl_PWPoX; zyG<@Dzo6d9&Oir20UhfMKEOFT5-=qboku8PViNJ|My`D}v4-p`5i^?{9xLX$|KLrL z6_rRp6oQYG&pW7AwY_ES?!5_K?0c?)@>tCKdT5b2y9?#IvX7QMsa*euC6_)0|7L8P zu9{VyHgEM)Y~@t$PSlURFY0a3LVEjOEsO8loj})gALO$PZgpZhjlQE~d>UEZ)m2=C z*vTUFWB54EqZXR_+#huQ{ciCB2kt3$4lE5DBPPnYkPVX|9siF5L3m=GuplXKGo)?M zhHcA?=W+7Hjk|Pz)B7*&-Twbh8lF*KaJM|_FenL$XtKr`D|4Nk^L?p&My<2*d0=J@ zi_%39S!0bIY-J|l8l!r0_i^TGFE23BV!HNMY0vkWFbBk-5G8XI2@*5;PVV=bbYL>D zGh(LKq9LU=*s(!)3l2SSKbW4{Akwx!m~bl)ik8BQ3c2xkx~)81E~r1`sjC!-Mf?!& z|N3O0uA5q{!1aLKdutJ+w-G*!KBVD8N4{#>d`ZxhERs-S*39SPp0O%|=4geZZHRD3 zgI!59cFGG#m;UDH_rpq&Zq?scWtswJYTzPQV%F44So0<-XL+`6EF+<#3xPY&B*vWA zOJ~}kA_Q}_qYzDjMSWgQH1h31iIT8UuEb34N@6ihP3kvnwEYCIOp)PT6qMt?)5Jk| zl=At7sDXzw^DKx9YlxhNa`Jd`Je;tQhYtTM0ro;p4lyg;*uMGZ%&r zc9Ezgcaxo>nBe+vqryVHsFkTYlFN88u37J=xz1A?+lMjPt$5d8UT;+^!)(htd zZ(-+85sHdGZ^0-CQvi4HO3l~Hzhe|->aH+m3UUbJQB#E(SyQ!Xx=WdPvbouU6DTvj zE`#SI&3ouCztc$Wdq`qO!yxPMRwV|n|KK|nk)mwR+v7CP=SMy8Nh2g+ReVVIn`})f zgSPklJ}6C&M~kU^xNUGn1JU{u(#LSR9l`m07NQqX(rIlcU2hwh7C2s;-eciJL-9xT z6Vuy@FgUh-#PMLInbectj3+Fp1$y6kb_O&Q|PmJi6OJR6amwn%>><68!rOuUFWs)`Fx-4s~{ zzctWzA+&M-;wtQOEoE1ihS=gziE~(;mO+?i{u#lb;nar=55)Z7ca-ZX(weZbu)#r) z1*#kODuU_-N*Rc&Uy-|=xDcKxc$93DV}YHZdMU&>>&U}&R)&B?3!dDjSX|u?ym4GumHJ%gtNRB* z(a`uzAD-!YrzHGKJn;pWc&D;sKlpsjQfS`I7>`0+B2o(son&m#-JcYvSRQj!*G{%9(*Ty@4wk;x=|bA+7vX z`~0s>+b*}CGb3V(WPfAXCq+OuJ2zflB&WaKdnBGUiQSwP5SMi#iM12?fQ^BTSe7Ie_L^6*L>z`oN{#+}xjP8_zn!~LsI4)q z>oZysm+4-D7*Xb>z%k#xpsnL}qTaQISzKA&Fe5ioLOpIsh~%+1 z)4|69qu?KI!cxZjeB?T*xz9ImC|2n2;bK3*_L6DqIjjT0pmV^b7tHP@x((kHn?p@$ zaK>V7A$pC3i(2>_3Y%CRz=PRr`w_zY2rPt)>mjW7C4a*XF^(aK{a3_cUFTO?N~$)PAX?KB93SuioCa68K%qAdR?q82ho09Ug~$(r?%*rY$`RoYLE4hAxob$_d3JSR z*Tjmy-iT%e?AtA%Q<{lhFG>S+%D$Fy4b+Ux)vS*f2&?#6aBy$e(qK_lHCy^-2enVo z+<=bMHq(~t=D}t?T^7Q7y)I8%uj_wyKWBfEC^JRh^)Ye*EiK_M0p+U3nkfDX6-=UvI2v$Tg^hf^}V z{O3mb;5!GcnZoQYZ$`aeVm2y+rwREN7}`mX4n?wJIc?0~cwZ_#qJV*mh`V%2%SkVX zmPbI@Uza2+{Wta`CM*8)Rz4+U_`f;pFgFnI7%{@)9Ug9o51ttmFg7nMTbE@oTBSM@ zjXk=s;bZGn>>4{!a3knr-hR9aAk4)UiiW`M8Tr`ZSn!9VMOx@1AU> z-(gv0Ib9ge@BC2631H)Jej_pdIRA;cA2GPWGzoR$-zkfq;_ybw^5W?~DlDfE->>;{ z86KAiZoeI5NI_Seg`eYk&c=X#y{#f#f`xWh*t!w_4f{uVd-D~kE63} zh_YLwFdb3@NY~IUDV;JP-5}lF-JOHdAky&CASo?KcXxM6Bi)>b^KE{>p8ec=t!u3t zTJHRbd&bd_m5@TliCGoTVHJaxpS90EJ(1?(^pLZf3&DOelM{r*zveQK|9OBW~S1Claq5~3lD9i zZ}_s;Q88;;qEgs(z63n^&vlZbm)@Y>@;@S8L$(&Ehr)D){kiOT=*K zS(Bfsu(U2`>06?30+sirehA{ugM_M>~P$9-)AL(uC{1x)LW=pOY)qSImw{n5!(y z$sEh(ODsMZwp%F*#HO@VAu1C7XT8*uqzlsT3^~4UDNYnDxn?{DIsX?3kNd;IFrHt^ zCYss=kMTKFV1a!z!1*{o7)p4G;i1UgLgBn=x=>&t#|;XH9U?{^7v}0>(Gm`JwD`z&`smq$r*A%sj#jnR4Fq-vo~meR#ZtV?EKa7vL}zCI zeL>_F{N{8;E4#)CyxojMGcm2g>mpeJ6;i)FG5sW#9UY4Rm*#QJp{zKai1($~S`LTy zky^Z%YXpb#CE3U7)X(ba?abIiIuvMSYTQQ#hI#Kz6rzIsN|wuhLp8Rra4CVRElDmX zpD(rhLq5|jfl6wHvUqC$sd`hAfZD?RI_N*>Ruud#U$m_d%`vuM%P~p+XltX>Xm7t) zo|Rc)g7lqfOjw96VR7jJHYC_31?b(i(vaiP)+VSwU4*Dk%JhrT;5d6dB)#MC@3vYr zaxa$E%Jt^`g1z>c{#!ddy39>9Q8q$@p!z#{qC4XckHA+IL8p(sRz`y4T$`V@& z`r%(EPcDGdTC`?=yLloHj(gH=;i#ll}5OBavHy;^hEryH?$I%#Iq zkCJkBk$4w6PnZ9mg21#@Pdip*$LuC|t?wFaankH2kBJrK@~JP`Qx9lh(c z4W9>Z`|tDmEGk2>(dKBSsJCnSvmA{4M|94yaEoGet2dEw_~jbjJ71Q97bNeifzlN| zNt*l5&(=e<>qYVN*@@~&6p}}o(;e_p3cdPfdK>V?o;af7$S#2C!C!g0149?CR(*C| z{%G@h<}x3aHD`XzwR|zp_~O5SqWNR|BJEclRcp9lMcLE_LiPUW#Yz1#H*~{w?J%xl z(kOl8t(Tv=s6V+}9wsT%sy^!Gg?K(uk66pu(^cNN3!U>Yv&B9(5U={*!OzdBKv#F_Se9T~8~j6R z)(=fN3RSr#`UPooQw;!7=1RfPq9RQpO{MGaVVjulK{*s&es+Ci16!B6jxfJKXCrR8cY@Hjyu0^S%D?Cb17#Hz($?K z-vVY<>Pc|VJHR$w5~$c>X7CV%e(zi`tcy-;Uq~l`GdVG(xBB;q2iO5*5#$|Yeg&cP zcu@QMXGZBDu$lIMK14()t@|y;y}Tzd`$&Q)>Tv%FdysowjMW9)BO(oTnm(q_%urfv zPI+5A*6+GRxAGuq@VZ7Tn-420j82*c4)tKdD-eI4eZcFok%77RLugem!=P!`-X`x6 zjNNIXQUu&#MxJS$8Gg56a<=|3_j!0n$vlNj1mCp9Nk$Q*`9~8j;iVgm8f&Wg9}9@? z8GmH{`>xM4su{24ObJTs^eR~VsJxh`5_ed%KY|}T-;abqO&L*fLNkdCwZPPx;aP63 zF}j>Z#PKg!ba^LolzOtNaAbTfKglVY@vHu8hGwaIp$T%%7s1c)-~6CXqnZ<9+&{=9 z`j|(vp<3?7WycFRrNxWSnB17so^o{;nJjsPyninBP%;rkzn2BPoP3^kw?m@@56!Yz zKgc^0XdH9!f@gn>G7SB;^#E#HOY%M%IeebYRy%HM)tPIs=Q1)&}jFrvfWXylJ#XtNMHZ!sR>u*RFrkvUV$hUMYut_h&K&w)ws* zbu|CctxBiqGFv4(+3yA}L6773>;CnzKS3R|wj^^nOG~ivTPFq+NwMOGjpa%rMY*+f z|GnE-A;Sf<4un~T1)5z;01m#R>7kI8TleL9(iz0yiH&O82pm-uxc~8H+`{qH!PR9< zhk&w)F_`5}aVJ5VcHxl~>i1h0%HHEq2o4XHjub<_)SA%L{JC)q-65_*dwpY4ehUhl zSkry?oSH0nIF5Y+RO38Ji4LkRky>9zWNMFgFr5_(gRy(X2_QzG;nzK4y%@K>ZVDRv zR>WlT%X$>Co-)Jpwf8?%{8Z>yBpa zmf^`Hz(-DBMa%rLKJ>)2nc=}pOZ?)39~Yp(x8JNdlXbfS=aMM(p~gaq8?=Sin+X-n z`^zd8VA~-5w=p-SbS+OWTTaHNBPxafb`zQdarMq|=6_IPc!c@%a0AO1fRf$oFmLVysQy`x&GL7LyUu-tU` zn0E;2t8f_5Hm;>K%9a_3Z8_aQ_{ ziYS&1OSZHQLYrqRY5J+9m76Cd*ULU(+mWaH%h9BHtkE$NX0z!^^D`+MGxqQMKIkVj zTivfumvMuwUMt7V*uKfIpOVXYWxudsmv4OLhF8IYNW|ldnCVpI8qv&@t1zQtpD4FhKG|ls{1Z0l6QX|`w-zK2CgyGWFT>N+pA>}m`_^xCV^D8j9R_a9d zAAg$b@cICJJp#H!b&vsdQHWJxj2Dbh2YZ~RRj-s?hX^M_lO)G#)7?&icsx`biukdm z-cdw(&P8be=9<6y>CUaz`b@`lyAQ+l=-!-&z0`4~KyK9tg$RDX+FO`*yh5+{SCL+` zOObu#oW)D0l6x|v48r|{sXTymi)0ESk6}m?LPQ29=jmuLGAc)K`y&Q%Hkm8Nf56c3 z>(CUNPu&D>mAjFzj>xYHLSh%m-g|hPtWFx^1U}tlA1=@`If-4EQhu2*>WVC%wTnU- z8p8%mjCaJ5XAViLqNDhz5d7%%)nWN>`8T16=ES%P%b`NQI9%oA#(xOTp6XL` zlK9p4INA$TcHcvCYI5_;nk`;4!C&JnWiysng52)vAq@90NaJUAvR_y~lwzsfqGMBa z*vBD*4lUXqj{ME+zdj)Z(3h4I`v|EBxec+aiTbF>*OO&!)l2Ux+^`v& zORxK@!ZHiQ9jyE>aP9Vl-A)6&UoF4L%l0o(lK2Fz=Q|AB(7AgkEh_82_PeMR+L*iU z9-2pm%*|mCmw4591%k=oL6j;L29lE#+m?CM6WtPV!2|XW)dt2{iE40NT}2hs3nQ!h zPi;Ew(s!2*xtt^a|L1>dcpU*;kC}`a3_=n7>wE&`U-D=9!NC~&SxEn?U+3sT4G?w( z>QNBwP;^zN8>G6KI)8CCjG(JvIZTC)tNnDI&6TFb5-xPfvsPy3*h0X?NWi6C_cGc0 zDx<1dD^+p0*Fw>TS3`#6tpGu8cp3cmd2bx|^T=Kjb9AFgL}Q}VeBE4P!{+IB1PcAj zrDj%gE8jn(DiD6yU+Wt-#D)<%YnF7f4G6ZtP^`>wz_o;j`(4ibWS1;sKdN?QK04@l znzR)Hf1np8{tGv6@%dfdd#fmhJwnW5q3i%R<$64B(4tRT94(tGJh$Eg;*1>td?2%2 z0TYYqT3W(b>v_(ihkK2UlDZhxegw=VT1t-64~6d`EaQVuk{a{_T=ucGsqS7)rE3Kt zQ2yWh5R#8tK+dg3#U75V5HT%ZVr)M%ytqG%d>9dI2d3iD*UnwKB(XabBdw{PIlf1H zk+R~m5~0%acZjfFs>mk#Q{JwwON~ces79vNuAm?@dVXVhY5UUo4<{zW`!#+g@-B_u zMaBOE0F(Cz>ilwxI+$#i8;DrTCDK=DwkedKt54bk;9i_8F@)>1$jGe6jY7ZEVWqAg z+vt%P+Gu9NTWbvA1%DHlqWb^*zlH-Z1$*fvu{+JN^fuGBK@itX9r~?sBDN>J|Emu!F0nf z*<0zV)jLS4wb>lxCNfVF009Ztx?^}UW{yvUxIhIq*I1V4!~^Lo?ljzEIgcXT?`Ix22KFgMPwsl5r@&Jcik z8IroDx&VK6IL6w0in6N~4Z#7vY>UIXQj|w*!u?BfCQ^6i1_W|53Il+9JCqG^$YqidVmiY+&5BDKrw8`~xk7_Dw zd0MCN4W(CrI~A!^B-2^30WO0-3JH)0CvH54m(~r-{H@swm?(CQINx|`=0wp5{+)Tq#0{a1gG^k*L!f?)-`Q3GQ)WTcR3&M;fKQ~pr5Tpb7FtaHpgV*<`j^OrLZtnNdaITT%PNcL$60Rc>4}kaVL7DJ zlldR-wnxBcBFI2sSrW;=(vljycS2!l^YJHDd3|F$Vkseu`pw!~R!|2$zZ_a4LelPD&Ae#{Sydl3&DlyGS4q>@whxz+ALO z(Pd_(l`Y}o#UI@XqvYA#biwR-_U-ED)CkePyjO98CIh=Fw8Ul^_D1oa59RpfeMvbi z!!lP9dQz|bT{7ngG47)5_jZ^nVCoa&o{$ap(+`q`5QZL*W@y){F6AWBJ_W|^w< zWYC+y27%Q5UF?uw6dGX}&$J=-tYl`yl#8>RCzEJF55(V;mR-W#6qiJn#&=wQ>TCEZ zH`*tvnRYN;oS{v`HFB6w^sC18q_Z#{ih#zWt9xr@W5JruB*M8q9rYL(c7=*O_wnGT zR4V6pxvz?A)oK2_dpoVQXRnH&IWh!9k4JK1 zxQw>odIAxe_2xOO8XagcTg96i=ZNeO_*Hd)k+6vZpa$nRJ7bHOVN)owt>3hN(R_OG zt58qyxs>q8B9DQpI5f}c77ZDHszDwE_o0AOi$sy#e*FaFFc(iL{_7_#0`F&LtZ@!n z=J#BHQ;vE7zdRx)`g%L~YoXQUB=KI@HEBV5I5!#;`ujwQfykP8x*<0H^;^n=#vx0$ ztb``F)fY8{FuOpEFFEEid}UdwS_rj*QCyg6huL*)5M_(Y?_BrQVw1-(zl^71gK=QR zORSE9F=#QU?cJ-yDEj#i-(>YwLTL1a*aAxu5<-{-GRP`kC4Ku#{N1p3B?|C;2|&U| zke?}70W=ZBSaNGMfB@|Dl%?L$swH?YU1fZ$R*OfInsMiA17Ky8!+~qdrAs(O>O?X< z|G2ZkR0^t;BB6w=U2`mR3%hfBRlv>!i}C~Zyr|Okw)8YlKlO7yQP&aw6&t4O^eMX2 zB@o2-lYr<*p|Yn?^~)4#+8IF54A+Tx19FT2DkMK4!v8?K=#XM;fkRbR=huI|wl9tg zZ7C3e4MUy4XQ-+nVZJ|#i5t6Lgy_WaFvlm@7)X^XlN`5oG?@v-*x zH_)LHy%xW7cNV@ronr+f);^!m`n39*s6K%mIXmF38ob;-COWj&?6{-Vfne!)vwxPI zPVMPY^4wZIlHKxEvRJ(0*Y`58Uin4wkYw2u!aU=sH=il!sd({n;taDmVXvMMFEDUL zFEsWQHo>?5XleAP>>$;rQ2+`cb0Nc!mzt5@7uMyzr>YO_Fwmzq)b2Si6rDM@kV=6FWq57g%s0*7j?%>+bj(YgG&_ zqy%}2<3Z`5Txv&$g^yd@4Zn3)kdX*WThjO9COe+h;c*{k{JAh=Uw-ws`{?fz=GqFh z3EPtCy5F@gsr03r5R=#S*XNwYCfBUX(pN5}%rmFkn2(ENXvnh04X3k^JPNNEDTGo& zE2#w$zzW)5upw%tTUu^wcs4=A24NW>wpq^cJ!MU{JmTb2SiOlCB>!2~{PJB{35%Fo zXf{WN99edD*X?w;Ve3-*1hdn+WH8OK5R{t)pVO4*>TU<;9DS@})B&AtCv?Qp!ZY#z zVWFxM!LP2aURCd&Kq}Ls?Bd~SK_qyx9UtfXl5IYCD?uFED9Y4|u{6hR|HzA-74c_& zE(7t~e#QrV!Hm@}SDd*ZED*?_&LjRdA@-NyJ4fq& zFwojCfJv_+k*?n@vEQ7(*RY2Ypcv0AcQtbON$X2aI7Q4V{AhMbF1Zg?jVCIi=xP0jwtX{_}spVLG084 zBA4mQ$UOu_5AZZN56BJ~Bq$kbF&L~Ue#%u@CCEH|U^3_)tuVad{4!RiPm`gaV&AG0 zHsQa%716%*?Z%rlUPr?W5$cvSOktgjVm69lAJ_`AmJhirfh}#Lqf1FdMKb*lZ`lGm zKr_(pj<*Q79+TwqSkZEQ&^dkpIphX@DO5zHcytB5mnz|EipLv^ATWdh=_gi^BxVqJ1^|TH~P)5tbYuE0qGe|PLIj8ri z^QTxlEgTlZ%MUT~0D`K}Hx_jvmiWl+zfCw9P>sjY1&b5hOA1fi#j8Vsu>;`%*}!Wm z21Q7O>zK>mc9JQCMqrH8FMkZ=Dp=&)VEE$FBaJ=7w}N!WawUWwD9EusvysG9IB6GuV9Qj z(uHwO+uO8rTQ2!B>bidWN^=vAhgry}gEY=hbLKgvY;Pc9d)=X^L>%2z0@$c1F#O)9 zgB-Id47rIZ>#E;bw7l@ldZFXNfP-z853Pe6&DI624n7ZR5y{mVDYCnmE2-s~sK_*K zTm#4}ZDO@?F>zm!iTuKnp{#rC@(kO~idV>h1`3ZK?dS`C_14q-C{FLA=rhO%D*&N^ zAo)0^v!Q?y8B~-YbcIoWJb0Q`R9YZnxx!MjJX8oBfQJU8*-2bBr?^-^QLVuGsyiHr zk(jdW^A_yYcmLbbnwyd+K@I+NKvE>X8jFf^hobx2gtCPCUAK z8xNP4IGy>6?{I)frMwng2*@YzFE@~z<4>0SW+CUM3>)5;3Q*-}(mx9X5J^6y3)2T3 z_h8XHr<~bGx!Tu>xkwa6O5(eoNZ<2kVj_^h=gtNpS^V9qyiZ9CP8B%Og#AH9?*c2z ztIBxp6cp6852X=6T?nHzLF$zDqpZM+FIgwj5BC$Z9nmEgp$A_O$M6uF70uC;i!EIC zW&N}`2qs!AQ-}}oTh@ke^}g(LJD3}M&-n5a=q^Ytr-V)wCDUP}6__qysJk8izB#cv zoc(Ewgh6g~lZ}D5T8yQFmb_3`#TzJJyb)MtQ2k=PT%2}azc8NX92xT$Ij`-+)@XWC zn>QguQT8){DR*M%Ei1tnMlM!AB%|m%2`NmH0#kwoG9Rz-v@UYDIP4f`2Z07b1tf}h-|6={-B?7+ zw8lf#1+R*~FeuifQoD};mBwx^;E%dT%!_8gDgj={UOYK%mpLUCM*sP!)ziK+<9+?7 zG7AXyE)K0D{=ftb%Q5s+w1|TW;uEZ7`(hu3>CudqA74pu#5Vq&pL`W_lpGD5`%k|$ z0}q1z=lL;)D?kK=Kl=+A$Cd?Ur{5L5c{B zI16*+dwqjsO33FMd-^Dsf>TELaWqlRivjmrSE~ z#`j=MFk#v3PX=T8>WHh&;7#!m8gMjIGBE~rO&DxeA1rVJ!FX5KQ`HnmEPE<7zDyV#Hrm=taPUS>JbP1} zq`w)6;!nBZIU&ubqz~8f`Iz+zONnho3hjM+jZ=~%Bl`J0A_3Q6N?I=kLn!hg2ks5N z1C>}-x|14`*Trg`NLV7rL9Rlsf>}6r@=Di#Ci}Xzx>g}t9r^)DeY9L zQM0N0BmsBEXhq6avX&MH#gtB7ua;JTfp!E_#I?zSP2Pz2{V%Ke7`!rJ9BD+b{&xkY zGa&?!0v|Fn?Kr(4;G!9sEc+G*u;kJ`%Skg&YL*gKmu5|fVqbq1jzoH zue<)9v2@D1W4Y$z?t37XpM2#!dOI)16j6Zx$Hzgy&6fU~znn-lzVRp=EH-osv)ueQ zrh_GZdFJ+=>1$L8--FGm?UW;sku35`TMuj6(4!*K%2;4Po#KE7q&C{Bo(oM)M^SxF zPP~PM#ePlyHAkgSlgZvb3%Z|ryLx0yUGi!s`0kBzeAIkv4(~{(P0b>~wf(l_m@o9Z zaIknJV_{jZ)0{}DL>YZUwnI)jB-L;%LRZv#S7@BUKe7;oa_355MaWJtpou>u3x{Ufg8RV6((9jV{j^}zMNMRvaFb*Yy| zZ??^S9Vsnx+r=sJjzj7+g=+I0sPcL#ELb4WVpoPaowqgYhr>@&`8l%hFITZ@JZd3a z@aL4`Y8%7TkhHTgVgp_d$vfE(O#G!T$2$zuxf1TBa;GO}NZ)ia8@Ic6U^ZjRf9s*`37E0ugx>WENER(%gZvf}*-=}i!S%Y}3)Q@|$310lYA>}F;f3O8A z7I6_-2EbqTGsyvAhRoNoB8gw87LSLtxl!Uplb>kVQ%{!;jmTxNo_`?Z9nKx`y-Hsq z_kcjfPwAyzU@we>9SEe}59Pt?hmU^&vEFHZDglPl-_{oFH!`l8y$h!ADzW$x{PV#o$5%Wvqnd4&^yxD*H)Bb~iRCiZWgVuJY z>A?zh5+lP${DMG(tx*$_=-QwZj~tcB*cIs&@8$Iy%aIN1WJxVY-Dou^TI{?x0x!$o zPaTPxoCacRC#vp*4k&1o*)ohAPeu=dOHDK zrYX^5pX@sX+OKw^0UuOOdAaNy*pkwD2K1VJ2KD@Wn5>Y$#VIDr;)RN8j_3OAeI0>G zn{Y5v3vOv=wS_d1^dJ@T7-!J~LB^?ZH0BG}mo>&TyS&|_&eW-rt^X9P!eM8gv69=O zGiwFh&p>Y$Y*NGTFUrf5x*|4neA9W3!qb6s#Zwsu=YO_Zg$I!@IzueO`W&+N#P$%E zD_Ilj$fh!Z2elY@=PJkV{^f!M`uD$u%0(Q+Xy)j3;0uMVwewoMgacdk8eps_ya4<} zMZtckaET@XO+k9B8@iBWFM63!eE**EXa{BcH7RBJwi>nNi(+%63-eZAen5b@TDFBd zp)6(br>ZU#e2QT9By+4b?#-0miL-ds(+c#CwUWW^3^m+t__t_f#nR+2g!VLW6Ph72 z=;4bVsQE2o!PT|GDxjK&@A+JrN3f%~a4O7VnDuz?T^Ld6FP$8#jh4=wS-*h1yo$L@ z3*@KMk(zkQczz;XI=M8(JWMUa>PqleSTbZDJ^P+dhdlv%2N}pX8kkXe`hvvA^-wRA za!a^LteWI~8lB92_QgbmuqxvEen$~3Lu~jOY1wHY1*2e~HlD&AJbdp-b`E`;^JZ$Z*JMvc~_) zY;b&t_%BgMoOQQUH)6QP8vyDqKFXr;i?s(&x1|4O{TUA76$=321^4xoufvxTw#L=_ zit{lZ^M35$YEVopBQ!5dW?z53%7Q3C1a3$2B{-W;f46a(ePTuj6DWIF8LX=Yd$G7_)Z!jK}!Vu^UW8rJvYLRcm}^bCsx5}RS?3$v7* zXkS``5E)EIb}iG&gcCYXg{}1&pumd@c8QBGYKdh_WbhdlQ zLEI{RU9y|>p5EThTV)AF;PYCv)|!N{XuA=wElBY#V{9i@JwfXpG@smCm=);q>G~hc z0Rco5fv3)VOW47zR)$2uf)d;pv5U70uS)= z!V;)4vqyZ2BB~OP>mUC=V%&KDU6=vE%{E&WG3|I;ULGtW6?7}Xb0d&FU<9~EK?pfo za^kUf1jI3|_CLc@%Bx%F{x{koR&@FA`?ncWgEgqKXtF{tcX3!a2p;FjV20JDzPc$M5<_6 zCt`B-{$~C${gt(k1kcJJ0X%btcoy&Rd_@G%`L5DeV?0V3Ef%I6F1V<^%usuN)7wPR zuvb?ca{fK;8d`03TKKtYI|G*lwS<3~p|p@cnIDk`zc~rW`NM1lU*U|V{57)c=>&)Z z1dkLMQGlR}iRPl-NgL!7maiw^)>_~y2E@S-1#@j@-;4>T#N#Y$%T=r?3h4M{ZLbHFq7ZiX}R$VBQ|mD>ar=%*@{t_dZ|f~nS63R zqAl32p{EFM^fOytcTmck#rtuWDTM=%S%W(=j-1<)oD8*@?(AD-7~Vb>a^BGDjuap0 zI?*l%hy|Q$Z&T8TDYi7jMpGb!RurQp4TPfebC}t%MM12VYQ(FLuq3F^FTFdX*v`5UcgiRgza4S8*=n&C@bdKRiIVQv~ zL~DYS0Wd~4*)`0hxCUa^b}FdaIeG?-$@0AR@gZn@403mR#*0(a$pMXwI*LGeJl6&e z73h0Vhd226WHLkW&}^R<;Q|KUL^fSN{^OYZ+Q$KO?TOypG6l2HdTeZ!ay{&6YxP<% zYR&l-AwRuW8yZL}j9Jw~xptpEBzFCqT$w2mvqO8+*7Ai#31hFM>Ph64mO7`d?;M?JrskE`q!Pzz$={yJ{1R}L6EFfxbMvLJ z-}&Faq&+EAV3oiI)*TIK7Ma?(zLn$JIV6Y#=QN+6Lk=XuDt#}%#&D>H$|b*!0-A*y z6Y(UNdS!mhQA}h491zQIi%@}O^!#ZjY`(uyEWZ*!2B@A;?mPB&utWpdY>6t`2_h?x zIluk*!0oy}$`NRQ1>LO0uPNkMC82B++VJJ(3VrVrb<^N zs}4g-2idjR_``%;%KRHtdK+`<{smd_@>SCyuxy2yOVFN#YzslcFx<(f1Z=Vs!()Vu zK$b?+4_fS6@r{xtxK*r21HLA;&jme}f4+)qVw2d^4#$nS1z;LB>bqwf=s`lJ0V?YTfE$qT3pPyx!(1N-zy%&pwtRkKZ6cT<5&Sx zQau#dE@h{4V`KG(dovQEKWLDsvjf$3DFN~z^_C6)pEGQ1J>h>a&*Z{-j)jBUIwE9o z_)X%h!Ekv%uXd5Hj_tIg3w>?ReYB2f&;SnWr7^gCyejt!hsqyvW#xe;YxAP|L{CY) zpiNmdqrk3(lXpAQU>t>9eK7PCXD#s$cdKjiNTlXF#3UYB&_S&ongc88682*N%Uv*~ zCIpdxJdffdwhJ7-d_cvqxP4$c1-=}g#PN_-Lr7odi26M_K^EE1B{bq>)I!RXp8)u; z{>8{Rckv0Z^?)}%b-5VJ?3kBA4~ez@B$M!hbQrKUj>o^K{-bMLQR%ZZY5_K4As_{T z{?hp0&ve6=J-cq)g$iHj1FH&^BEP+_R%wxxduA#nxXUmlZU3e4+LxF;%mGERbTL zU*vigD*cxgdJoO1ao#zcd#@0C8<4ivO1Awldjv4JFf0-*_Aag^1&81i3BY}##h_Ad z;rZPVN}?V2k{phS$;86nk7Bt6dwCPSAZ_^ZlU8qXgL|P~j5+>bRGIQ0`2h(#L_NEE zVqYOrXot~clm8LL z;qu^w2n8&K7t^Z)?pH`wU29R+4w6+Ws98SE$B&m}RPF(vr_nHy;;X0?Z_PQQ?zIed zd-ZGbSdmEuXXgMi#$7x_X<=T7nR|fQv;7GPPrz!W1*CVw7t%DZA976$b<6dkz+Pr; zDTJnqSc4!^)dpaykdM7x!fG>@tGQFF!D_zpK0d#jpRdyIb|Dg8?)zHZZln_z+d9^z zi|*DH*319ZYqxsUsL#0?ay4!^#?J(x09^!K{TSx9CsK{oG?1j&YmF_j?x;s!^IxXU zM+(<}dE35)v380qnet?oI@A((F?`}v*6vP3|JeNl__jF?%`VuSJ*nUkv zcf8yHm1f2j4xZdtECCssmWxYby$F8({0&dfOafs1W}dDx;dA_PM%1o2+T7h z(>HN3DuLsMUHXM9W50sUb_rQ!y4i;t;_?^xQgocslXByUFILknrRB76I}+MaCb^=v zh*N{BfRtPilTUeh8%QydNjtqOypr*&Tz^Fc8)B^{oUiets3@S@$D*?fac?NUw)2HQ zJVr;cVFGZk{FdO1SK?+fgn7Z}feoM*9FG*5qLS;xc(@g>QE66EUZLn?2Bfj`s7 zH8A}Ohp8=j#GrYbwc_jpp)zf0!-qPGIjv7drh~zfdKe-0`!Ty^@HU-2qi4B`?{Led zaRnuSsitP@$?X0~u(5rNe@~y!n5lQ+AUIiqc!9}4G6LQrdUC3S` z&1oL@5x7J&7_!@asn&PjnmR=OA%tw#IpI+)uy@Dh>(yH1)QQYKsUFi~=U+Jl)yN<% z1_jC@u)c1qMJ#*O?ut&fPXQ3$96sc1W4N~A{NdRF82FThb(}z$q|{U)CE*FKkHbh- zU=6|eQ~RGs|Jobv=s(@sMaLQnaC=2RJ|6dS68ZxC>dhv7OgzYqb?nqgFh%QrWao<-SxBQR#b4t_TZ3msg@QaKvCuq%-k_PXKwkp$z=sVL4~#a{@9s#Z>?*4WKv-cw1ydK2M_AKmpan@RyV$Uf znNXBa0=PzCTpa#zomw|QU@jAL>hQE+;&DoVd8KazYs@4fnS0cLP;7@@#rIfi**hf5 z7k~Vwo@6AcfPN`)CZt?!h^mmCd6t=)2En|nlzdlYKBU#}u)Kz0?l>HkH`_2Ckt0JR zU6{PuBwl^^--9y@9Bg3RW9oet18f~y>ofR$`PtyB!wF{)q0bNO%_wBvo7E+TGek;| z3nx>B3i}gpWm<7v0V#N4c%E*43+YwRz-E_Lu<7LDnAzE{rqdbLEQzxlo&B z<$9Y%dV@?t+awCg)kC6!c2^A6QTIL*X)bR_imH-Vmp-%dWy0}YO=x9e$XKk3W-3B7;@xL$7S*@sM5_~yih9?DqJ9XPU89-(b%HV&;TC?=DTwR7C3z~(C}HQ z%mrj0Y(=+~c0dyOHEYoH*CkgeP4%KqHiaM+1O%Ge&n_VxNs})<&s7LR#6U=}T5(Jl zCU&YkY^<_4tpADsy9F0E?8h|l9x7jQeMbd`Hd$q)Q*|Yg=06c)lEtnb$F<0T-d7-T zgMM}S2XN<-h)G<{oIbDJS3o;AjM}r;RqbF^uT_iAfaD|^TYdV{>JRidyxvYUQ zaA9#Ei)RA)fxKH%5dT88TFeIN9%9QZgsjHu?aUOD9}^_*hir>l-?YL6j|aXyf4-Vt z5y>P`x07%k!FgxLQUjoBJCKGp{JR7z@h0AG=z=7}s^%aR@px+*Ix%tl4`FhKJI@a6 zXQzW?&FJe;srKxLmWKuP0e><+DfR9&1=@pCh6wE+xzSSg3zo9+fDWwApT4}#_ti!~ z7yG@Ce2PJjzpBSWaRv<&qLy$0ltt91+NiilvA}l1q(GJUV)}o46;iZ zn7;Zlo)hFoG$qwzdhCn@0tUVG>UpF{2M4aeKi+l*ZQjPQhN6Th^)4EN=O*0;h!zKB z;THcACHLmC?$C2VFi_Qi$8O#tvv3oYX;v6`teqGD&EPB zzSa(uqZT>(e2z*v&klUpq;y8_3Ln?jN3`c;k?wfGxfkn28zpDf+Nhm~1iB=KZH#CC zI09R0mR!+DuEih`&*(R<-%GS+>W&Q!QBX-+t#VXCV-`9(d$4<9>p$fi(AQT}ZeO8j z4|orc-J=$~n~;8-JdTR+d~Nl}9?vRcRq0qr_Z*wKU2;AhLD4n}FYIH! zF7PlyDf^>rkml|8yxV+8`c}_IWLo*`u2~O1fCzO#ag`x+TLh8;EGoXIZs>K<6WYY> z&F!rgV|`W8nptJ+9fyV+z*$)dLA2zuXg+fd=rg1h88oWCn;1}ktKB<>! zD-OVwnSoGY?}Mt%boyYli$Q4}Ql+|9iSE*m0i%VS>ofJ%7Hek*Q^g+nhkH}Np2p`$ zPm7+W8Ka#B;qjkXl#ImJ%xPag4y6HrZ94Op%7JwsfbHsZWr*$*If?Hbi!qaAY9g#J~^75+~j z9%S74DP0EDQtk-e18{q8#iY zfqgz!=6w0aC%tJGS=dYp)CgZ(HPJ{)4nu%g+2xpk>RdOJSmpjb-5^}J^kNFQ|!vXin)ys9*}4bR%{N_ZGaa$i#C zNNT`B=XXGLW8!VzXBcwhIZzRl7K52$hsM$#PLVU01B4#`j~bF1%L*k$5JTWSrEWMH zZ^@EW&q){cVm>~Pp*F(~G1_gnXQO#-zP@IuBEK3?;hX7I+Hty7zkCoS5z7`@0WI*7q0;9DOrOP0X8*{RoEZn=(iehlew{2&u7{-ixugsB^2m9MSPEIf3~kx*&D& z3)-;Jx$fan7*7u&C2elu@)DMPm_T8jMe6Vh6_TMt!hiI2v|#&pfTV&c&#B%j_A|uo z8_zS`b!JpG*e(`|78X%=>CHxHbM;})a+E+d)V02CBmUB}2wSH_Avy=QK==GM1mJ-0 zVNa2Xld^%O3;1TS31rkgY~J?y(g?4r;r!o#!4y*D0mx08_htdEChfQ?q%O6gbSLC0*7P6JHIsxI9AfN z+uA~}cc}sdRvpEmeo^1SMv5j5Hj`BW%Jgv0G;YaVQM6p-Y9CE*M?aSRsoqR*rTFvs z7z0tZxzQ3spg0jj2z0w{CMvRS^jU-q$UGJz6f3++A|tG4bMr6vhty%HvMr2U971=I z=Bj8bq8QMDGu3%JAWR&il^e~mgB)|kUJ*x9PC4NVe(01kzZC6n)I$d$ojlf~;2_mD zL?^k+e5+)Fs1tCSELy}Bi_N;%6)g0lR{qBzu&2k^CZH(J-lp54(*ZXGr8YPqgwhjfZN3u54z#ofv>*yTAf5+J2IlE zb*e^x?;yC-nH|~!32*rPS}Fcd76}arL(mCGs04vCk;>*}t0XlSQxzv_BbbTGgeS%BSQ!b^ zSra-=7f(rkY5>n(c85%j(UcPK>k_c7E^B2)b%qGvC+M2PqcDzmg5mmtH^tILe;s_VD zio-?~NA-FV>KQt5hENm1qyGCGv_d}4rUe%+O*)#5TMdiIadbW39X#Xo+=J# z>#mBClkeSO?14aC#s6qJ>v+1~|Nm>pbmusZ8sn{vX^xJmZHAfV=$h_4Iwy`clQT!p zm~M`m?wFb$)A4)x+`hlRxZV8edR^Bu9*_G2G&Hu7+wAoq6cvK({1mvnY#E;?VHKYz zX62!1!sEczdyxKiGMKLl(7wo`%#M$jp4#RqgKg2*z^)gij1&QQZsT zjFB!w1d%)<3$WZMaFYvkDQeqnhZ^$!7bVY7>FCuW`;XY;kg2VyWd0PtO&p(sE(O5u z@i-5cTJgKPyp3`PznXr!)~-n=t~#kdD#}RG$EsR*QP)sBdgjrj+7o1NLXFXfm;rg= zg!SFNZEt^-fri@IlM&2n3?5J+|AF5cHjG9=O&lLt<`7Ei zw`>D{hZ`AL8H=u;>J}*Y#_qfm=a7Rg2^S&Ba2cObBAho&IN=H~quB2mNJ*gl6wh_L z^Sv(*QV=$3~z*cLopIphDojI|^gnfNgnl@hQ@U(1ENmx4momqi*x*{`zG-xI7 zC|5at*iK2o*X(>+rp|#if3Ei~(LM=KvOI$lcFJP0=cvSGJxu)S=Y|9gu&j5G;s4eU z-qruW*v+i#tHKu3YTlfIVq#Kfv{zl2brp4Vq2crFgL;2AWjqiIE4^>EPKS+`htK&2 z)H2i-sHfSZ8D({R_SpEdHGhZS$^IE+e~=`@g%ct|U@{++VR-C6(HVu&^F(}{g6Chw zu5jzm`ADRyYp?Oco!lSAl;Ys{Fkd-DtAALLBexi%)IL!$1ID@=89Ja~N%1`#i5abs zmGycS$6$}jr3R0LBa3)S$`nMMhm(oDv16Rs9a_~cT>hRNwf$hjSL_}Tikz-WoNzrN zZ>F%|2Bs`2L}*i{KwDD?C=808kXSux5ZtnRScy*0?bP&qAt6Sdy?el#P_KDKX6JlY z>0Wtl-t}V=_aR`%rAeny@FRx@qHys33rS%ba5hAhy|-26ql0|ZeyiL3+EVZcdZ`Q_ zvNFg*rYtJNo^2 z2NE*2JaIKw2+8H3#pH{jf?m3DUz|X&QUmn&X-MAPt3yo-6uA-;cTn8`v1B zB9JV*=I3t#dA}V=g?sv3SSfgEAzIy5CT8#a^Iq}CtZA$D{c%~-JLlCa&4!)FH=$6m zl~L5^%Y-HpOM;0(uPca|^X~v~G6o^e=5U z^EXvaBIfL|0$P7XYc(v{U2GIXik~b1%Jo=mPUyR@+^gN+jI#KO0d3!g$ z*DR1ort{_B0O1?4l=#MYK^c=|#lcuf#Z=5MVoP`ZH!RT*GIVtKd{Ck+t8NzGMwI^u zMzF{39OqQ-hamgHYWdG|Zumlsh>J3gkoUrf*96)G>7d9m2Zc-6PO>BO_9k{#)qkl2 zX-|nzMBkO*HtT_?>VNd#m0(%l{?j@LKo4W0l$bWkE}JJO6&>CWv1@>0)l2X1Bay;=O}Cd zH@?q61cA+I77V?$0j8vu+Z{E$vlSq0qgJo}r6TC|vh+ z{N&Cz?vNF^7o-d>r_aM(rHA=*>wlgVhO^N{j2==?^gYp&d-?qa7IHy%FWQJw&l5u# zwFY!BG&dR^$e)SrMOeiAuL87F@8r&{$L$I^dvbHE+ksJ_GQoTucf_TQ|6BAO_PYsA zFOz}bNEw^udTr$4H|!bkdrU~k%5Q@^=cmb}8jn`8;=uA7=S$L^A6A_vcir8Nx?~uO@h$2SFb`rY}*5YA$4a z^oMp2`8zsR?=P~txXva`?>i#t)h$R&gAt*795Na6$)5vy%OY>sFyw04AX~YN>=x9_ zQwYfI(E<<8m@q$nQ*5Q0^o6lgq$I@4l}o}#Tp_n1fwq(E_3IJ<|?tA~aT-dmJ@ zO8YH*0L=Y11nMdJP?LQzv)>f4kx}q(RkL|)(6N_3j3zlce@-aBUUrtpw2%J38~L*_ zd?E5ndYv2N+8b;)0bR=%PuBPWUWf^93cn68sCM2@SYG%7-01s6t;b0?aBK^9jB=p5 z*AED&v1edKkZ@a4(z0OC=`2j~OEQAba(eZFFk~!oQh)Oo=ad44E*JA7W|!2zO4T%U zf7rT^%Ko0c)m>A2FQ2-x;jfvr8-kN18k@Oo^qZ8_Y0>FY|I&baWKP-am<()(@#!G~B#|s(9(5&^I?p3APz!ymHKo z2H8I|nE}JeLJ|>15Dxwkrn&4pbu~HrwjV}V%*;=~*ajYIN!Q;~zcTB{iGRQ(-wWBK zWtWs(GV=8??tg+q=hTEy2visDQKh8ZBK@*9bMyg0fAKAI#R*DPQT@9oq3zZrI{k`A zKSv&u8>XqtPqV01>3_BMuKOBwf}x2KH(*q2*sz#G-j6k%|6XG}c(tBzTz(9m=OB#_ zrG<&&hmH5tvTl8}#sEv4xNV0CoFo~@g@b^{{ZV=)rw4otSXj>KOGUjuZn9fKmiUyP z0UXfmp_OT;&F2my63wEBrB~pyEQQ4<)+e9m$VmbDa@u=!RGLluTG-derB|WOiU`=B77V5Mn`pS_x}Zz7$5~ zG&iEY@W}}q^W_UU<8bN7KBCt4?CydzdQ8v{8NtFxJLl7@9+`)dOd#z6!$oCQ$3q@0 z3Ah1&<4-%6K|bG%FE4EJhdGMi zGh4mE;#ro~TO$>A<(S!UncC1}ZPeBau+C6z{u~+QBuC%7$8C7)LJILkSp&55RuCo- z$#-uHc!cw)T@7llNcA9V%T{jkwSoT)O9~nRa*rY#n0X_$ATiNR5h@Wk+Y9;D!!!zG zYTxk;F`>9szshetl2sj;L0erIN!wWVp?g&R+Q@Ohtu16nzSVHc0rGY!ARYVkrTT5W%P?+Evok*wt{k!z!jZpb6n9c4Epr@QcA8DP#ydir5&kJ@*PFRGQFH ze)>gc=Blsnx3RS#^mG7S8|y2s{_m2y3Oj&TW+ULCnGCY0y5&Ar!L;6(sNmSfZUu8X z)hSgSVI?fYsY2N+wWRilL0HeV2KbfnCORsq!X(q{2tmmXZ_|dXubTQ3fb{N~4m*WT z06o$ztyh%%9nE5$6&LhJ?q~wc{qYy4Zwginx_u`5*n~ zwmu>yWML3-6Oo&Q9f$M%C5Ip9I}8w#F3e?ruVxaF3Br*`Az7sm zzVdM2lF~cw**Q}VAhX5$589K`pM-7%DL+u}>g<3RZybkg$V9(AsK@^kFm z6OI(s4qC>J3vaf%AP4LaoSe#&8vs@DN#|Q;SzyzJ4!NrnOaN9Fn4?UiwN`2tNISfw zvbkG0kY0wj_H9lIcWl=*w(y#0-^~6YDPjTg!{p%)meu=X z?t+$zKQbkRm(&+Bs6-Ix;{#-yfr2ad#~R9mJ=v1xVP5S_**`u4SOUFoh9kq*{(22| z`m0CPv?q(Skqt(8RSKl9NF)_P;Sg+v(;gm@?LgJPYlc&4+56YHFm zK@*~vw+&U)4RR_qn_mIofZSq6P+epXA=N!!C;`Eq0Or1l5JO8-Pple~gF&5*X+(m? zC$}g>ki_xydQQD}c9A+s)A7HV4_RWgwWyP3-*9Y&%N}6-7)io8=;_LcNye;GWCZqW zx_4i#S~fo8>P(`f+Ry#z^B}R1kH<+0pO-=YJ>}~S8>~+M}Q_=X)P@6P!b=595got79;5zz zo&d%gV6fp>&$>?hHwP32@yncX{V;~b+~BB0f3hxCQyDoAMc^oKqGS9%`w~|2 z5BK0?OOh^Qv!r;imnw-fk)^0?Ug`>OLd#3~P?&1nk!cb57Xxe4PjM>7@it#I(TZTbwa_Dl;F!MgsS+3LWIp8Hlh2+`wc9tL$|8hJdJ4 zO~fR$FMCw(Pk7}7?!Fb552d}|d`%ABY`1zt_Tw}QPH$?qW_$9ZLw;1gcy1Bg7=wG) z8IE7<-{)ba`v&|LUz-~8^x61;9pf}WKp2pz-MdzGJJ=OF-JcV5+mw|Njpzu1`@U?R4enNhTul7LM_jLJ&c z35RovJ&gR8kq_R%ykb0+{V7g&>3es&vhsHcc7BUBxXLz2O?XJsgC9m96+kaV7EYq? zk=y#PyHLtW<4E#H&HP1>#1xw~N2#W(Ec$vv?}}dS4WR^a*gK$q#cA$(<_Y}l`uxJv zkL6e;EGw_PKtEARZs+(`kmYqVQ}v%tJzBeIqicyTE;BEWLT3fL1x~++Ncr+Rny;9Y zS@-(IYb=Vi>wVRoSD%v!wxx_3?+niO zWnRiaB;Q$ARZk$*%Gh;K?M18z+u)rQ4r+(aPdX@2iDd%CI7FN{qQvqK^`39A_}%+%q3NodQ~x5$do-N~yEm@IWpL;w1+ZP=;k^}6K40BaaSA)|s^`!M@PWdHh<|zA9kB zYW26+R3P|KvSBHL*JDm*4&i!RnTlLqK1J_LCb+%L;gyK5WF=q0Bgby?ormt>XjAUI z4pXD-f5p(+jJiEgXIDDh4Kc{Wr0x5)PKVy%nV?$=d;<(=IUJp?Hz<4FYYHG!;_xFK zf@{Z*4}B;eim^Ewlk!Ib|FQi^6;{l}my;3pEi{{JigL}f$fhlrCnwxrm-`0rPcYdo zm+v=TNn(T4JY0pdK6KPx{S~yyk-4oQ*KF$jm~K_JQA;1Y8M`ZefBkbeV_Jw66@P^z zXZE{1mwEd)6?B1rhMcf)j>k)Sf*(u=4=7zJ__52^5~UXg$dQcI-M}mV1g&Fd>;rHE zXNbwr5kI@dVKl?o(2&*9`adbj4k^$4RS>%ijR{Q`w{QAg2+3!NlFsmj+V=Us&A@E% zb(@TvOe}>UTw`(0h{dBQZ5tCsLZOqk`s z%w7&Nyk8X-iz0C#rn6&e4U6@_tDJ$0f@bm$1%2@I5;5C5!4 ziQB`{C&q$kJV4$JjjEbn7^ay_Sn=l==l*w-wxO_=;9mrs&UaP&tW$99%He8Vhp|F_jw()xV%FPtmcnL5vca{SHe#wy|`J#aqL0Gw@wk$L) zuUuG*D)X5X&}*$`e&&&s)~@FeAo)+RNWb=qQg6ZZ9^#`}yG;o))*;FyjAtM{8~H9} zqN$XEGq9@Z;2fZcb(Ov@ovCf-qY(EJPHC?*25u7;2V-EMa0fL{4V2;MQ>++?6om0{ zfLK;4{YK(NT6Wgzuq-dMQK9yR_UJxg2@XOyFKBAaJxUUZ+<0Wtbj9dNC9|b|FJL8P`&ft%by=CpwKmXJQjuzbf zD+!1u_v&8~7b$D?rrb-k6hUKiq;zn^amKs)n&ET4jWWt~h zJRk_*H4EK;fs;Nae6bg3U(uD~NrXVeRO0|fqoEu-)}!j=Km#E90jZF-GUV@1wz|}q zNx|F!R|h2-?5y}6+?Ux1ElLC8RMz*tE>U3o+LzgP_$JSU>yF1Fb#UoV)Q|8*>$ox?XtdWiiR# zS23M#(WX4htC%ArU2KkB1CU*^lcfLFfbuz%KfR|MD0`HJL&B!)y*q$)oaLe4?bf%% z^7+{;a((oErht3R?nq_`*Cc1oXNnf(Aigh81Cej*4gY@D%;K_|?g-jpy{7neK}eZJ zX>|yMGMbf*Y;OOuyvRHP&H^d#vLxjar0Su_^aOEbb(|<`^$>^ZFxRUB4}R*$G3I^W zHdZ90$=^KcL~wp$HgT;-T4HgEw|3&xdh?PM)v4Im`(q8-BTPrig^(-(BWUsvb%Ir+ z4lY~cYY9o9szOD1w2Cg1p53{+u%OH9tg6re>D{>^MI_!I$q$%$353cb0kag7*Iwx8 zvf=H!OyL6cQtf|&Sq%??zUmT#j#=)RyB%>7PPw&&_tn@L)7Z)+?5r%C@Hgg_zC@}B zcE}(ll*4`hm!doKqq0m>S|arTVeCu@k9?_7fxnz;}N8Uby6CTf6VCXA8hx zfn&QuKJB+3aKHa`i^G|r2^UaiVDQU=L{MpPFqA$Rn9J^f*9qBtfv6T>pYi^X1(im> z=&!Hu^lNsTFv25gZv=0^vk#{AW+_5)gf{i!I7kXeNQjlPCTIZ*C1 zU1Nncok*EegZ-SDqhxdP0phny2RnprLy9PL;3KUOyii7My)juKq>#6Tnv>dGddqSX z3*N;KLn0MLe=$;H-xKpr$qHA2Y&34oze$N#%$sAU%yoa1hpN0A+H1a_5zG8uns1Vz zE%{kHa-Qsm(xc$_5brq|$(zE07+F?qaj)Bh`T@~4*;4LiXTa{Qdk}zKki~s1L@xu& z&S7iDp8|XaQPE%wGZV!U{2&pxA%SaIEj$-8(dJNf8&d2@)s9p z&QrPl?RXZCBRo)@`ZsaSAHe38BsyUy4?v%M6_tJmatF?i79yizz0+cf@%i}@(eLYx zw z-70qIn@(0m&kS->Os$BsSp8v-CK=%W;jDde2JO=6(q)SNHG!j|_HM{THYk<+L{&kk zANQL|(+Fo)ghE`jOf5PS*#RGlk+8^jo#luBp-m84P-^D|*i~i^6HNP(z=jjyM4nEZ zVq<3~j~TB=AOdE_+d9m6mNct3xHr}o{?1o`<+lNq2x}OpsUaTo0N#MJYBf9F+ROU>@?sNwYX}t zNcW%B1DG+qL~p3%S^AKMBh(IUBThfgc#V? zAF^Uk>4C6mMeKqXNqHt0B1TaH6{j+v2q$=Y6tmcXWC%Dbt#-;F;F6hj@y{IQ zN1%wk?hmWOcz_V-P#}&=6Tw4QO7{2iK=NH3=^IT%$2a=tmIOBg2l1 zr%CUE?n5dSJNO&{^Yg=x5>7YULZ#;I^m_aLo{&%H254C3jdbtu*W_EnjKdoGUDeA+%lJ z!!?=~z-?Z-q-g;KZp(`@+S7B~R_mauNH80G>w>gWP5y=LUOk&F6Csq0RH6E&7h!)d zoY89Pt`7++edFf@3IZm6<^E`P=sG;gZAiJ)BJ*120fVp^5e(nCiPN&2uY6;b!OGBk5e~+E!d)hO_#fqlOCU1g1iQZN!8^~vRP!VLgkZ)q%_S+; ze_qV!`e(X`6z6C6jfH%CDdqw?q3de<-OW|hL`6EYlTnrFY5gCUu(0$$`o(j;t#?OK z@rs$^d%W@p82RSDhfM%PuE-g{S8e>!VmuKI@KMftXhr*#rpjADYKx&RmPAi*3-51o zP!|Hd_TgaAzksj>;60&Lz*u=|83qPxipJlcFc$n_)#h4p31r zx0v~Z7H9aIFQ#Gx50*1(`%ItuZ`7UA4~%5uoamZ;0WGusGt$hCZ_oe*`C};|xn|%) zu3qn?xXQWxu56entSZ%D(rQfbA(`!VakO(p(P;!T#ZEY z%sgO@uhUQ=t$N(ROu~?_eM*y3jts4}ODE)*Ar%NGlv$hX-h6U-6^r+*Z|pwU(m9PW`#(~WMB-VhgDhLF*Oew#s9e~+PuBZ85i(GT ztCC>io&t7UoHaGh&DWu9-c2#JwC7e^2M7uYMm*$j*e5|SiB9pqlMsfbixIR^!kX%k zfvAq>6RecP?d|eJVJu=9K^g8~=Ec6H6nb*16!Hh(sOaaTHLT?!?_G5C?(dI6=Z-`R z=DV|*f`Y9`Tb4E!ubx6(onMhPRLr?>M>&G8gJ(oD>J8tB09b-bPTl(NoH~E4NsGEk z&vJ+AVQ?;Q2Qlp51%5=KCEhFPQAnwt7!3 zMG2Ly7En;%82N}QuNAEMw^HMjQ}&@P7tq(}9%p!_*j(&tu(PrCq&|CX1Yy9?uAI{q zM7uYtk0`L{N=0=6^siJSUjo7?XHq(<*lHmWO5tYgQ{y5+rEdCn`?X2^cFMudS&`7^ zr%%;{!cJQam|_U6sg#Ane*TArTSl{NgWLT)kmuaZ`9Nk8%j_IJ-+a>~%O+Nim0alf z{9FNp-hF_o4RBGlBF{(L9x@`U9EZTqVVD3Rj++Gw{fkI55N${-!u2DJuhsiuKHJ-i zbwO-gMo1n&0m|vC<5aY6`~dBCn1mByRr!jA5_T&I70}M=%0-unsiy`>#``$dxy@~& z&&$x`0j#R@iF7~DuvD1;I<`1JGV#}bkn>NgrM;{I+JY~6L4r zC%b0j{HLO+0rWt!j`ebKmN$B#g~S51fW+*p*l)2OpKbAcQnn(1Y~C5=tq7>WLYEEz zEtZx;RJ7WwS5cv*z!8Cet8Oj^OyKZad>}7Sq%9QNQ%Hj}3<`T;Y?O*!CSqkkLMa=X zF}V-;FbNYU-nf$`Bc6c@nXggyRU^3G=yY;IRG$DowahWXfV=e916IbTe46c^HoUQz3gIhkXb~`-;m_y(@xHeN<|R z<%w$Y#2U}&vaj5Y$u)KW^NiwH_GpxT@r@sEsi>lPSCDt7T}gKFo4FCfX@Y4d+_GQ3 zS-`wjEjnzPX_bZ4A!L+Rg}#$;nx*J9vKZMn+ixMCBIGztwp@w_1XIKR@mTA`SAW(d~#8bXlTt$>!tn= zbqNxJ7R?pWNg)85Ug;;R4=%#=U-E!(>GwM}(e;bFW6~FV**CIDahH(!L!6=+(5fUG z9Q86gkt2Hh_fg>ynr&mbeXqkGYbz#3QdS5AeLJHcaLQ4GWq{;C<}_X@3AfxN%W^ly z=iehiCsXzT;gq%J7mBqUFTv?>cc2(~(rcZ|1}IQO|2emvO0K+i2tdQvRrW`1d46q^ zP|98FQDp-|9`sG}=j7>1L^N%|(cFk)IZav4;p>BnOaLsK;X;3d6K@?S2&?dk=BsWD zbd=51p(|qtP7BgPLjrQ~7xR{|2^i7(a!tuSD?9ubhjy=5Y4y6u>>a?XAMRK150d~A zo31&k`-jV{dgGIrNlOwjPDM2MYpqa1J!%xhq)$9!BC8}{%-p)zGXbrwAT+8R_{3%09D;I>WTHA-O_r>e#zDZQBZDovusqkqj z{`)o!LetgCc5QAS;@XBd;iVs*!ZU|qZ)MmILVLI23OwQ^LAwDr7xc9!t`W8uf{s#c ztRK6sGX0h{VHz3_@1Fkt_n7!TQ_a&|23@+{b7~cMxCnf(4&b~`xyloK_gApfoP_+) z&Hw;o>E8dx9VF~4ft>QD0$-S1%^l>W#29%zA5*MLw7Pf>-wMje+SQfSp!yrY1;d+V zW_d8KW#dv#3Rwh>UhouP*bC^p;^>2hjkGFSkagzaWah`kjH9jjJbd&kSJD7zqFd-7 z_?(hrKYtf_OJ!)#5r;(eB=8ak>FrMSi8 zaSOB8^S=9hMp(ABT_~;kyKDV(ibZe&6;=SP^s8kd^>W?1{Uv1`HZbi3)rGLzd3;jd z_5s3XLNP&z`2NRpm`ftA!Ik^g2ay&Co~c3N6FCaSZBrf;upLO-!37alItlS@O@Pgg z$7hkTZ)BZLT4BY2ReQZp9&6R}f_~+7Aq2URUQ7G^_ITKS+rY@nRR>PwC(lC{(r-o2 zBWbl}qT4d;1c`q2Lo_SrE6a`&;>GSVNH-Y+gQVgtnOW~NH1)VR3d}ups-F6NsuVmW zKDWN|SlG;)h>>2nN_$a(gh>yA&e5LyHpp`ZV+7;+UnOMK5B*CbM9kjIu}|k>xA!W>xTNriX_D zig}u$TPkm|=OpF|3v2BM8Umx$W2=lqYC?>#Fd=0#K)0TM6M7iRMhj*Jvw+L9fZaI| z`3)<7QAT2rE0};>!TjMCPen$InWDe?$nPMArb>VA!qT$Pw6vqVNZYi%cM_=Bp)7Vjt6Cz>eXgLU(0Ld zlgmm^2z6GH!lkSjBD2j<=ql;?DIBmyPMaUIBY*g&nvGu{-^gB8_K1 zNbbnCNG~~2ygR048`IDuYD~cMdmbS=R-y(pOh)DeVM9QNi4<|!rcf{5|LWp^IL(SzI3~AFR;4-IFeZ7K@IA+Bh_t2`Z-CTJs(NZyM))Zi)NN;Jb-7@( zek-7`Bf?S880ctm3p4A4sjbV;p$gL*(*Efp0^)A)SZwYsAl?wUv#-CB@+_&{@ACe` z-@BEVfK-uln8~eTx-2zP*?x2s9zE@6sJZ$}=scPqjEr>>AqtcK%=6NSQ|vBUYBQjZEF26v!^MfppaltElfo*zR0K zNoEVUMT-?d0B6;Kyag(`0GLXW7a9)@pU?m1ae`2^bT!mJDYF6|XaPbljnvKIjyg-t)1B4PTphEB$^nJ>@N5``mvrm?Yo zmBO~eyawwJyzDv0$30B47|MGrc#nIDUJ%og2c!JOA1=KbBKhb{IG7piIdiMpu3jEr zv*6dHH?(hgEu^qDWUs{z~_0C^%&n80^H0H||!hoTQJFl`9Y76)44^a&&LqOty z4#PtSE*RQ<$E~;C3!~v2PDfeh7akQNZDs>=CtGV-GG3A}8W6voc`kL4%isa;tS@Js+UJl&XS?*;V+m^GvRRR`xbiTi{B5YBS#5RnB#Pahp*@>OhS$yUQs<5{`X&!JFZSe zAm?=oRNjV^^eZ748%$9;*+=s`KB9eQ`$v({YM--Y#X(Tq1%8pS-w*b2Vn-CbzGQ2F zowE$Wkf10hGstFm|2SX?e=+;PqZQ_n#Dou~81qDOZ?&xavg8XwL*fS-wx+a(mYfMy z`i2vG{>69=!!eR;{}~3t5ZqDa%g53m=aTc`%Ws;~lA*DKChG9dLz6(N*m}`2Qz#hv8bNmPb_l zy5f!1SVpVPw_0F9EkJA5n6f_cDF(H7!OMm<<==`Oj}fOO;Q&d%ado_MBt7}HAAm!| zvpH|(Vo|9ZMXqKWQF$U|W6U9VDq~|27ETx*`$n6EZkDrRp`moN1N}o)h&@W z+LQLiDp-8=ZY|(I>d2t*{puUv8nFw=o6T5`eKcQ(0Nm!|fakcoIDySGOt&1|$~K}` z-~V@llz*fGFVlDHTxv4rIkC(#xk55Ul;eLGq1?_k;#ItF`^51JXTXlk3BKyKY?PdE z19^$rg>^d+!UF`%a_3Y_lUg1KW~L zG;90lZMczFoev1fHWOX?V+WzUzL;MLL@I$il4*nto~&im(CjyrA36xKmlZ@(T0B5e z|N5viXPk@&ZvWIrn)E&S)hh4nc08!L#K?iXiWoJ$GcQamOYJ%BQp}Y67Tk|TbxA__ zd5bU=r5Inm%E3b?5E1}=o=+c)%2oS=V|HdEGHVXCl$ZNn&s?XW4y+unmXathZ=f3b zIvDxdxZ1(J44s!>2^VLayA<15|96g(?8%ltu6IcNLnpx1S8%5SRhh}GnQj$Cz)jF( z!u}War4yu8LWF6c_`0k9`{h|G->)!#I}bJPd^XwW@XAxWe{3d32jFhB?x1s&=L(hz z{zd(wi7Rtt-oRV>)D1kjsR4O5429ZAOmZ3@VyL*xvpV`&y`0;O$zF}8mc6fNtceyE zA${RkeTw0;}hbRYaL)#|sg_EYQW#=y{)*o%lDFj;3ngMRx@ zwNidkbhyphD(^5I&joI?s`2*9k%_kK!bkIPfY%3KJ>HOGqK4|IpGPq64N8ucrXjWrLn>`<~7N z0nX^IpA~DPlMneXc9hFfSx<5Ovt8)?;Q(ceN|^P8qVDtSy)Zap#0uxMyn(hmLi?mfL1QRh zgRSu6Rj)!56(SZZpzS6;ujHy9ZvP9cDkN@_)Mj#FQR_TfA_Niw!$nML6DKnC24xZo zG=Kepn9%x2btdH*V}iK0(5JDhZFJCH%$dwE7Dc6Vzg3^$Yq|T0gbfX(1p;el)+}U9 zSd%Sr0X_F0o;~=ih3dkOV#`Jf@AiRz5JF6Tx`ja00D+)?*G__J?OfG1ws)`}QFi#y z=t!wQH{2CGkFL`p8C39G_N-@CK|Z7ptPDs_$vmbQPa#^W58F0 zQOHcf03i(JqA|Wf^}W@&i9bM{#eJ5KmzU79(;rv=Al{p){*a`AXBsS=r6)4i6ITiS-1#2XFs|xqu-6$hW-^zZ94IW+Nh#Dm$*zrrZ+)BzP!-(6auTg!hn8WCg0e z3I*em_o>%zVnHR&@|I+|T)m#Ykzz?AU4GhNqaPskzMrj^h~jq{+pEdM2DD@G*^ehT zW@LRy$}dm=SuhUWOuv>*K);*xXUW`cSVf>E;1w_27+Z1}xxCfsM^m-2+n`AzfjXST z`a1l2Q2tb%1Oin#0NUeXze*j`dSB2ChrLm)LYL6C|5BMt>^MkEPCfPOWtQG62w9f~ zdmsHJw5~;Q8E`*cT>9%Jj_;pzCN(eQi0!T9k9@+p1rO>IWiJb75i-GVqO9rTsH>I; z`y=1{qz|XOZ7|Ut$Vm(SpiD7ny8UyXzhYtPhg0-UK&5nbdr@&Q;$+wSGa0>DM-KJd z(4_V?6FkY*$fXxE<^GGaKCmczf@F3Ltg}SePbj^ki4QSp=?7pS|CLy|f{`B=)v%vL zNYy~Wew1?s)+&H7=vT!W^F31zTYVt^B5C8AkTJ~y4U_i*t`|V{l4L)judIcm)y(X?)gNb0^e}B#S zFghV`4sem%TQ6z%SMKiKu$Sl(pU21-e)*y#G~-O^v9!pU!QU6ALi zqRk3G;K$K_Fs8OoSHf9>ac=meWbs&0IN5v*?1kRS^5aw(0RV@uUSkSX@vpcLj)&DF^=CFWg2Nvyjc7S&GxyGb$Rxc8@bZYl)sT ze5Cv`$I4TFVy75;VuxlZN)cJ+?r7h_Foyl|$~jQP(6K#Z*+EWC5Xh?;4;;JhriX%% zXe_tJz=L#Y%x&)puy5y2pFp^e55S*=R(dh=@U)#o?1aQ&NrmZKxBg3?1Q**QA%B>S zKH|T)R{!(N!06BhXYePv1Nwl|=VvX@-p;NR^{!MLcEP=(%j$D4NAx+h(Cl$P%NNQ? z0e`oG&$erA0XNz6y1fM>5XhHcCHrqC^qC9}m@bNs6-ImLCM<&?tBPC5-1)ZqG$hse zYy&JI)J$*9#Pn%}XYOw!u#>jH7s1)?^6Vs*BA}zp!JRFBk38o16O0}uOIHL55@6)QH*%I;y4Ji&FBpFCA-Md;!{B=X1w4_l6|Dcl6bcs zz@1eFT?o5AajhWb`=UpAaUr(dI!1*PfCW>ns~mYiEb*>-vweLFTr%Kw<4e8BpdEp1 zZ-8gRmv+N^XS3UcS9sJut3Ke^9bA7hh_6ibcj5CF2K6OLs1D0m2)Hm-8Jo*&@2mpT^$fudsL-Sd?wvBu6*Va}Ydm)U@UrG0#z@@9))G_PQc%ckD|JRLFT) zU^{i2q1G&D&fWRb?qXA|%2RPZnoVT)?ogQ82uqbiG|t^Yy)cpwY{~Y^MQf4#u72B^ z&1h*<^DV1ZVQd@AE-51CY0ZD;h^$9B(!_tDsgLA@Pt~0LEsf4E z+g9=c$!GpT1K1-uAJRrX>Y|p0vUqRP&)0jBe8t0!0FMd=mE3-=ZZO7)`jIFl;ePr0 zgAM+N-m`D!hW%cLY#wE1Se&w~VK)phzPwdxtt~*IbLmJs`c@%|HHsQe6h3Ag2#dEJ z%J1g^$Ay%Veh%{jJA}C2UOoeA+IPNHmgpTf>r1-K zPtBtuz{$Q7(QBli>#i8@1s?`ig#Z7w0t?oQCI1r&>nLPe4-Uo>=zI<##}7ku&NjPm z>;sIekdP$DN*tVLg^VOQby%3D25q0XA)GWViUU-xo$nEX3}cyzsrc3*UAtq~`t4l0 z5M6qtId-ggC<{@xRRZpB$?Ud;eYRi27t;RJkY_4$DC3QVEG}WLtO!E-jNKs)#T=Re zfBU&+?a6@PrKG^&q~>4&gZ;2JXDT~5hfhvWCk8u5=YtSE7=h*@03DnsaAd`>5*wK5 zxhmYOH-l%{x64{xjLehh+Re4KicZ2SdUT=%8A#at|E(!BY)D8adP&$lZVUgTBYO(4 z$y0l(KDe#&IJa{K1G2fNsXM37(YtTg-~R260}{vZ=Dq@P$(h8&(cu5I1#qR3^P75> z-p;VuvSsnuWz*ImW8k>H0|q+ta8Z0EnSAWdk-xi2YidF_==Lv@4^!^ss%M9aY^{qnh$TQfLFRu z3uvY7td3|M*q!LulZZEF2t-b)Muj&x;)Sz#|D$S)g%ebZwK7^@y)@peNaI&a=Me^M zL!dOn1ni3sf|1grTMq`SA23l?vR59EOqdNJ%pVO46E^AiVL4i^rT|{*mt;0_RPSCo z=iyyuZ!2Q+T8(SA=`*t^Q()l10D(735fbOio@N7gZZFYj7=2WA*I3$dw+y(MhfJdUdG`u8N3lDX3-?1j=*2f|J(ZNl4(OrK^9mYFwK zrEYIDU;A7A4e`$NH1Rv8NdXt80!a%IXH!97QdTS|;q}w~2HSWUQ#4MFBq#?nE!>{w z|M7GdUQu^nxThI<0BMDxrKC$blm-cv?if-^LVD=#mPS%?q`O=0=lMK<8qJ-rPfN+^wYsctLU|3%mu7_8CCmD$q^0%(FH>5IpQAxiO74Q$ zg0ti8PtxJ94u2*yf~!`Sa5%`tI&F zwKq0tomKYzc3qa@zI@LJuuP(RGKPLsnJh3cIWLn<^o?ongZ*xIrD21;ow$rv>DPsR zuerd~X(TlEyIp_!adm9v{dg{O`GmJBNAw4vc8A~)Z2Y%A2!vO$prv>tKcw*rx9b=Yavl#pBHD$idUngCm&oU`$BX(6A{jAiqcd3W!eXch%V;rm?jLttxvG}pR;*_UKD=8vq6copBcsm1? z&Ea&<0YbUom_)1CoQvcH5bud<^n<8WBZi~qTkDgyrU!$ZYq1gor;Bq+cMV4JKzL;< zgWf^vH=n!SKU7PKCD!)M0LnX;dR>H3Xa{uOj&#{xHc~?SiL~90L3!(+g41WAG&~%k z*_bdAsbpuYDplpkw*kyJLxE7qdg|-hj2_HY0FOmT6N8g);hZ(c{FQ!r?t27pvm2ny zt_Cqo5w5?pRgHxj&SIZ`1o{mKNFhHJkjO#MBDnuMRDiJM*n4M*nYYs?j41Lb#E+|p z)YAy(?aI3y!ZKm*es`}gf0@HUat`S;H|oym+}BG|V;Enaru#?4#alGwqkBXbX=VHIR+eqkAr`=?jou=?B%Q7gcXd&VR#1TW;`piz zM@^^85swK&5cmpcKG3dtIO*xoaI=~CN^9gjt)0loqy2ic-Bc-F!U+TXJya`BMH(cZ*?P z@&{t-QEj$eH(xH*jK=RA4|q>TE_!;Fy%l7pVM+gZ1K7Sz6ld;F7Z$odKKjkuTV~3w z##rr8J-*sT_{esAtEy{RoBGg4I$5v&hMK0apeAV`dVoKUl>RmUErA;WV7M3 z^u|go4cRh0S2Dvc!>b&}dn$AREJWzU_b2j1#4eMCmuE>kdl{={_(QwP_U?AIt>?ax z!!7oQ5zQ7?TgA7ONgK89*k}WyvCm`Adn|0o zS(xxo`u***b%ofz*kaJUNpmiu+St`um#x^*0TxC0{FHK7y?BMO&%du7q<9Q}`V8E- z1G;XYWFnV7xUkfq^L^n^%~3g{3kt!+0veNFRVDQ-(SP}XYmpDwKoBe*Y&w~NVQvtd z{Lj(7aqK%SQA!Pl$uri-35Nl4Wom^msJ14<7vvm|DbyR>4Lk60JI&AA$)of3{}Mhw zA10?aVC@JHE;X+?be;j8D`p#!dpFmUWv)vSixhyMWOgyG#%*}oQd7=o=M(h;;S=D*i8ECDj{v%+k83wQo=GRpX1pxno!RsI&On_u?GoRjv z@1-w`-XH7`9xOWz;yVg?U#{EKe7CkBs4rfhbKSdYIy%B@MTOWRQZJB21;n4UwM3>26w#1y!_8VyF$1SJZHF$&rTj6%Zw}YmHaO(0)$;*dpR9g{kCwXah4cl^01gXRab%wTlKBJm~_R{&=FWmP}@f$ z$bG8p#s5G#Kv=kLXnFG!_Wt~B?c(gogoD7~jKg27eu^H4OUf)@)Njd~?U(8fx!DT? z0{jNHRNkYT1l59=K;l3D)jZ}=T#n`s7+amBt^+`}4%}Y`s5~9x9&PCsqSLEqFdj~M zI;&c@&8s~B3s;LwiBsxA`V6_y@W4kLbQjxHDcz?!$==c z_w3dzj#_H;Yj)AczCsq%;C@6vy6=-@cgZaKxHt;qM%p9gQA(4oG(mPDhwWmBp_LTf zSTYXf6~rxaw0{S?6>Mp`@_de93Bfa1iCuL+E&Gl*(S3gUPw@zRkt#=Ixx~|>+w7^o zqq)Pk6X0+eJP6Ce5v^#$l8uHuk?HZN(f(7}vkFo?B+5na2r|r%OF~$8rBObMW)~Gn z9f_*=7K*}bR(apQn~Q{ol=$T(@)h>uHDY{Y`scnK5SfISH6FT}&67mh@G0g1{O|8%#WUhfOfmRYUh5;#v5T@$D83(v)j!{Y>Z?Y8>QjvHSfR#x)GMb z?VoaNQqKGpmgf0t1Pzuf!tVI*i2TnJUkTHUFFmo@{AfqDpe~qyLLsh>h(BcaX$`>z zArs%MY%Ve8@5Nx2+|wC`ZeD_^59nWw(RJ0mr|j`Dp&l(zT)m3QCGwNyw=64m`Vo-d zzVJ-JZV^IEio=F-TeE?|*CJhbG)JypBM}(mn1Nu7LB9FDzXy8J55 z&~zDo$;P4+V?!esXSo4EfAVa?l;v1sZ46=zHo|c*5Xx&>x`=g%NrB~V#9|b)tLX9Q z;476MggB(c+Wtp*mE(QD_4Y32^bp##+pz=A*DO`SbUD{@*4|WXvN1;*wN&*_SXsTD znX`Op9RwJ!7ys>&PyVq~hl_D14BWz=f1=|{xc}82DFNk-JLw7;ou_K1LTj8H3htMU z;Ji%~NdW?mt{!|A9eCoWeyhAB|yvzI2ZM~rMpX8jsrCBgDFz-cu~J&G!xGGgu)sqmx_)i~E-$yQlO2OlApZ^kDOMb3rQKo) ze(0*E{rZ5z?&TiJAATZajuYNf^-3=kqJddxFn`KjPV;) zN}H||+AStiwI}fiNtGcyFfPr`NG4o}>cil?>BRzs2yOJh4v<&rO#asKI!{i!g&Gdd zzGcfco|{WV&hUm;XJFOyv9*=tAcCSO`0Y}5%F@$%sPZ^iWryCX|H@CT^JxYX)!D6b zVI_|Y?Wi~TQ3(Sgr}-rvpZ5S99xU7{P>g@K5<>_k9QQQmk6K-285O)_DOP?>+X8#F z2c$5|-jAqWq7T-TwF|FVlww&_d=MOM9I*~@LCHd_MjF9%UZ0!$^R(2S95hydk0Ze> zulA6$_P<8BC{pjmgPj&@pwh`NuLtHi8~NFa3PJt7)1@Xx4h$P|jMN*~%r1+7M#Z0w zDJXt2AHkIn3XX5?1jXr<}_or0_DMGL>YO~B#j}1{w6e5lOKCSPbjs%!>!TcGE zDFa;3#R7sxq~NoT$Y{x{@gLNLUq#cdU*$<%v=B7zh}z|mfg^uw)W87QbE_8I!%~*Y zb@e}7q}*X}zqJ|%h~)RD(gtk%S&OL_C*#v^H)-HPrt7Z=ZCpzy&ky2go7a6|`)dJxvxn>jth$rR7S)-B%bpJ&P9R zw~_sg*u*giG5CNmA>l2^_jKeVB;E6F(!lFlZ2nWk2da(p^rj21B{y$<{msiw4>;m~ z(?4ngRteau@_W8w@*7W2Cq8VZO*ugo{di^=o{Bu*_?y}aH0FzO(Z@^WXO&u*r5q9s zU{>2Qw!-74exZ8yV)XwF3@(C{INQPN?S_bp81W5{XNvn>hLUoW6!UASvy~fyMYF9B zyJZ5!tOw0r)C7joyZ>3Di2Tl~w#}!S5r(WT`;L5jAz{5JHdX>iurVp}hL#mM2)p5g zeel`(ow(y!OVfjvINg*=vGoL_3c3*pgNcMBWu6z?bm$%>Suhc|Isi zy?WOQE|W$y&>8N=DtaU=NV~oZvAU(0+ZKG;O0l{`CiN&X&Ic-tm@mO;#>N}vJz7H1 zv`v-a!}Tw}NTlyfA6%W-N-qO zaFIZQjdsj^t^E-Sf%x+h!1~|!s(E5mjC?cv8z61`8(=P@s@4v`;w)a8c|fvp+0CDroZP%V9Y zD@6)~VCn^|^|8qj5p=#O7HLS4{3LrJpJCc4(t%YkT{oz%{_^1JR{zX-!J+( zKghJyd8~9~Xn1XWa0b5ohdrj#8IJtm`+#U}q$o>*h1`j`%*Vd;nVF%2(lX5%8zdtM zepI_+yr-f-#tVa5}j>lXBcwgOU8jZQibsdhp<)B&|T;tb_x4*TYGGgortbmPk{B3`jfA;W}J?>ZfTKq>0=4+tB}{H{U|jCT+aCe^@A5od2HhG;-Mihwjj7 zWZuUgKy=FA@c={Vk953w5+LP3U9(|{@Q%{pcnV>A;+dm4E52}dMj5$z$R6~?dm&B&T$^ROM{ND%wTX{C6n-v}`Xoloa1aZbGBwhFfNKT=z) z>q?P2UkIA=sj>WLX3DD^M@%(gsB{GM#?5#1-eLw3{&GfTen`eMuiSsiZMVV^3UUeO z%cXL7S@&|3UZFa#~VRnD*6h5=rCzYaUV1S$+{Qrw-WXB8uL3#pAv&lvFa&wRR7Q;{3o_I9z z*qGQ59GzL>98xr}yruVTS6y5bVtQD4*9EN~Ft)DWWo<<+#wgSfs`MuuCkTkZU)g>q zWE$PI!6mz}Ne)8nOWk#?-AKyM$UgEo()a9Ul#9(?aqjBHQEYqm8cF*tXeH(-qxBXJ zUi|SrR57S$@D=ChgGB!DC=>>beo)FcXG&I1qag8r@W7c01uV$n7Ygz|sh&!S`*I(f_%wX}{o5Z^^pH>!Iw2Y&X=7 z|DD4<`JH#t*ENo+$t@A(o^W6Y`JZ3o@8fCdOKM5vVCw!uP*@~*OJ98-Jp>wx`X3(n0Wa6^bOa%kP}!3}Eby(0 znt}}bA^S5?MY>BViK=h9hQ3Vz#>$19VHZ$frApvejY1E-h!GMid^L#fz{ThQmmAv1zhU%cpVDq=RVe;8O# zyr%{XMQ4xfB2p$*Jl!eGI$rjq>M=rPEB814Ul~O$F8UxbPlK*4@#;y2a7SVY((;9` z-puxp#U#lxQFWVCO3DAeb4n`OSJvZa-tY~y$6+Elou6-NkvK*+x23euzO^Etd)xt#ca|v&eUZ}Ypt!pA7r%NyCCOb=x>O}Dd)@^qlOc| zRw16>LLrR(#RfC(Nek!RKJ8MDFu$b$UEVZ#ch*`f&}qxU?@^^sIZiD3+F+cID?^7R zwkxTz8(VGv9s-A{w{-HT^F_LRp&=HhlqI#A>&h=MXVRveiWoRq--Kd|%X9!_>~A;` ztE@qbPx%6I2AQ?Cz$V1BwE0;UGr34p1BLTmOqX0Y`$w0-#Tz$U?|=-Ji&E^{=f(7% zm!`Ea;hJvrt*XUU1dW!vGH3iyZRj{WM+3Jjx_GgqQ6o4T56B#on~NmpGx?I4%5W-i zne?TQYIgzXDV@HyM|)9l4)Oxo4@p-aJku1C*;B(3Sxhgz5Yzqk-VW=F$;p2vh-|lT zx*a#f03CM0&vySoB{av~JgsRW#v{)%EBxG(JA=xaK;esNSio<_-rprJfK^Ch4z z;`BJuETNTm00vNkUeBhvuop?jScxh5gVe4h+#kK-+n{N#5=xcvY|ZDL&TTC7=Lzgd zANk!F4Gb)okv0Q>QkLWwiAACQCQxA8)GvaQdzjyPVEA#^xDfX#%DL^oIv-sbGL*az$ z2+Y2Yp7W0nh>CM@b#dvwJ%r(!C?8t*XkH)uDW}25jlsovufY#Q_ZPfONm|vklOxPoMB}@KrSdDXp-wuO#>0=b`}U zT?_QPg-4<00UxV+q8eW+e+bMLT#jt=reQnH1xAv;~pj(FZaCjj`7@q2%3v_&K^&HOcc&N*(blB{{a zNzODI#wt}LIet7L*nYd1zK4E7A$F>IQ2GghSELLsdL(p_6hUDC6N zlAZq~U$bcCq)5k@ui$8Wxzy*UgrJ6=r@opc{0q+oaN1`tN~TJn z3IrPdTg5l~6cJCJ+VIZ5a!@(So2>~BpVznAmV$oNVt_GHA}>a8)H z^u>z`Y-Gv)+Sk7vB(W-vyN?0Wtwnha7$3ECPi)|SK!u&NeXk2`Lg9axC(5Z))CNcH zl~+(sXM2W0V8~4`0^?GLii#5quW)OJU|$g1hM~r*$G`i&?W4v_{=JB89Oott{lv!M zx$|Y{Yu!d_&?MmAAr9r)sH0$DnqO3*)B}wy8G5BY>u8o(WYn@TdbBKsJh)9UH?7lV&sk;Q} zReQr5cm}fD7ke6B9eqFAAG;qQ%&=RaV|N=4=mhrkBY;a=Bb~}mT>&K?=&ASsg2gHZ zNE~_|iteG2Zh$y1{)*59#fj-9TE7eXPDUD3%JX5Asn617Bz1? zzM94*MGsG8p^*lS8xq{Fy$*)t$g2vlQT8&mN{#HuoYk4a171-8kK4-oZpjQi$B{OK z{Qj|Kd@Q?4qX}Sf#O-8}26EaoQPLC=*rU=~Q>l3~*6mEWGV#;fUb9@`UsaPzv-rH* z4$BE5@Fi=dwu|t2jiOKbYFTnxbKdR^&3N?c_;!63JtQ(<(NDdO4RS#GD&OAVm$xQ% zOU>`{!)Hgocm2BnsjFdh{hOK@?Y*r{!@mmLLTFBYcu!vb{uv14o{C1{d!_LCtsO%d zY54+M2-k*(a&7nj9)XV2nu_@5g(7>t5}*-uG;y?IA{q1086=wMIS9i6Zw1pn*}^?> zz8J~@S{tv=k}i8+|2+%}1bZsT8lb76X`l&tM05=39mB>rayg4SjUJ$oS2(%bXBo#O z%UGRzDG+>$NrpiCcEA^L+BQ<@;4d{nv-AUp#g&}{86B6$Bl;MWncQkr6wt&A1tvC+ zMMDTFhWNpB`7yJ5@~@cA!xjIqdGIE@memOhOThTc=monqG0t7~PW8&Aw~0-_oyu$B zLqJq5)k-wf%%f2J4Hyi#0QWK$B`ryc%hSE!^!lAHBrTxF&pe#u=mdKj#N&|*11rn6 z$(l-J?H|*1*DFqMbi}!8tQq!w8$3Ds@d{L-I^FIeypkP2X|%B?c=kGv%+J4sEg#|j zh{t6faNu+}VV?s+R-`Mtu^a#~gT#apvj>d31e{$<-LH0{G^+eGKN>m$6oPB52viLr z>aSk+_jmd24eDGFBw1BB9}OJ@?ceB0jENcy#}~b#^Q%Yp0ooRen0hKQED6N9!lL%7 zcfyI#?6>haphN(b2umPY*ZxG1^b4e=AhB!fjJ!whQ%LZ?HY$Sb9Q&WDReZ+@eeA{~ zZzxjf4pH(fD!FFqX8`{av;?zguQtOpB(;TU*zAO6JH;4zdkza%JiS$E63zzBTJSwA z2**A$Y)ZMlo*VqxTxYQ3EuZnDV!bCVp3MAa#-aMW-}+l%7(IG{946oNwl~`iJYS(c zkD+rN$Qa>rB}|k(sF~?A*4t?g`OJRCf3Fzbj9ubFTM?dMh(ZqsH?909c zB!w6GHLJ;nbK~L3T_NFPfRVl^_?fh*H>J{i2us1>?gPmgM)RMJx4&Z1nffowlA5Hz zj_lnqKOu85I3Z($_bFLmmR1dx5RuyS5j7e)mkObcKQfGHW(VyQ1JTd$*KDr`&C!;( zN;s><`5-J_kMq_6teNt{D1?=^8M`Vj%S(x?M#n11FX1wh($DKb<6nf6=`hk8MG5+h-Kf$b;`NxUn4;pwv<68<2DCup$M}Eb~vR9Ioz0M5ZaRWRZVW`Ck3QBfk3p zRb3}MhhLk8R65^wsdS)slFH44y-0Z)g6{@nUTYM#vlmLDwQ~P@FN9~Ap>&twgsym$6e+v*?1!BrUGm_jEdENb znqD7(X%8eF2GKh1Zu4IIxptLo#gSmhWnVk^W29&gV8>w>X;X;{klY(2S<+`IDO9Y9 z7#S|!TH_PESr42}$v#SYa1(E@j*xbQ^o>|gJe88W(ieufe1inT&T?;^hVb0<|gN- zLa1xS>KYns8Kl(sp}I^ZKK>6H&6~O5;?;tJ{}8x$8u^CxUXY0eXg*cLXe3EULhgB! z^^NSBtGT|E@Z@3g4~L@@O1R_n3*43X#Ey7$@uhHOl_Z$vgSNb-!g5hlQg_dq{(U}CrhmXPA=+xST@en-Dm$_ zfVJ%YpT7c^>-y;Z>^Y~)z4-a{xL5nE-As+XVzOX)w7BO0W)JPgX)Udu@Cazp(SQ&( zidC1Tm-v82d;3vb9ECOy5#DdE{IK3+$d%hWtn6}b*o41!X46uwDAI=%G+o9Ek0AF?A?oA{p_<=~%XH(*;H_5-k z(J174%3-`f` zjaf~w)T564y$t(c)02X(Z`Z(wK^2(>O*hrlkbuHKt*x^blim%(>eAfPDo9#Z+ zveuNt(ZqFLTHMt8^Oe&;v#cv^)!r59eH2ZY{0_2~O5f?RI=>Mresk}v5Ao4WqVGK- z%?CIUoubIX*PWlRF@o>cfn;%Sg7#>vX~?V+lt<1R*AN2G;_(ycb;5qX9jt-I&i~L9 z*{593>RV8`ni0$7kxchg5I(ERDAZJE8t4yjt6US3`?5M9K>wX~#+AEg(2L^Td1ar} zXb&cedZxJ!k+pa*Lk5mVd1vh)T}uAE*H}yN5nHPV%3EE74s`k~ln;N76b=`mxrx5w zxX(!*He;~z0@dU+^Sg;mwJ!$`x(Hc8exiQghR}|#_Zub2@?E0v26gU_#nG%pi_sp{ zonV`ygAhQu=x+!hm&6b?7B&BB!vavsR4_3or?(7P?Tcw5vIXa zMmzOJVy#Zl^`@7E0+s1_E=y-7Eg2UIKnPNhzk|i5(?TGy*z@)(WA#EMgks2&M{4L( zfz>QR)p+&SE|Ak8%Ew5p9Ck6HPDH=g4B~&`FrjB&*)9vUynxO+MD6D>#}EvH>aGJh zIwT5#+?GH3f5N*5ohGHPNrP?YQwJvGI!|3Vy=FNkOT4hS4L|w7pKa=E7{d(#6vD{t zst=P*ThrLY(1J_WngA9vtM|Cts{A1nv8qQo-q4QqbsPc=*+_1|MU8vjLtbdV)^A2C z_Xu-EW@Qv7I5`z9qa=K9yZ*YZXjl7eC4k?5^m8GdjwX@zn1hXM;ndMQQq5t6>u3Eq zj1U3NyAvxHiT)ghmXrweMx*T8F;{PjNiS_D?k9U2tWSfGJ7s8kim5nVY(GPO*W7Un z-Sqtf*J?=nQh?q-JcJx~d4=+a-M8-bO+m?oGfHSW9EQ|Q5jtU23qzTBAajlzJ3__y zZEgC>vep%+QCQc_S$9yHv`+GngbV{qxsOEmkKtz5DSa<-8f~x5KGU%u)41{FvbToR zew6wzOET(LPY)dIaNN5-b;&&k8QTM+!C5wH+fa(zl6OuCHQZt!WW#t6vY6~ zZ<`bcN=DE;`p1up$||qcHXi}8NbdUD0wOs$A+a$v{m%GKC@m=wOJ4H#N0|BOt3Cdh z`wzvFJ3|UnPP#F?QnkN!!jX99Hq2h)sO_I8wA4#tf4|MP72oIDQ%~BO zQgFMEld8kbmAE}5H2X$cf=1lzqa}DCJf{TUCA<##Iy&;EnT&Uo<-(#YZPDo}?4>SP zT<`p27ynm>i4HG4w%-Y4(H6_!@!#sNB%mHlwX9A#A@Ew|2T+^)BhX$O+qD@1C>i z>+!cAk;h=BK10~=nW9`%uDL^M&F&!N!Xe6VbXm|s=mqJ(o?QDs!g0nh6u~k2D_txi zX44gq(-zrc6f)HOyYasSpLeD$%+q2wgVGbNzseruHnqq2i8FMfQ?*ewi1krpgdlvP zG#cGLhw$%(7!A>T#?*BBUX33SLq&Ox)TE>{-E=cqW+RjJjC#D2?6R!G67IC$JI6$x z&nIVfn|eUSX%T{x+IZ3F3Bg_gHvbr95nI*I|Ml*^rwkJHM`!=mw;&niRaZRnYecu@ z-DkzB>eaNDht(H3XM8S%tCSU*8*l1QGtwHL;q?9zLqciV$9bQ(G$)q@x7LozZzp>s})v*;HoP2)+Z)feA z7OY_*LCH))Cz^DgR(5FvYTCzDS(?|lW%PLZJyuH*tMf8DAzA6kiC+?isb)Sl>DO6D zp$4GS623v%LD~uUr&pR5+O%qSG0-Uvxfr4e9ImWMt$Q0()O`IIW`zs}w^-E0^+`n# zaDB9isms}wD5QB2B{m%$uM&q7X`JcNa!;|XJ0Qv_nPW(R`0WYtP`-lJzzJ1-XI>Cu zOp+%WHBrBcF7szuD5dKu-nn=K3mM+s*CmoRG&SS(DhJhFD+oNc_R3D@$_94puV^Tr zal0InB`2@dnU`F}#>aN%RSMwP-{YmddEHXI`Z&>+rPW}o8lX#BRatRw0QF)#XNM-seX`%F9d~^9e2*ta z6q{|7-WoaLyT43hoD^!mwnIG64k2#a9#p%2y;Ebjbpry07@C^3Ycft)hoGmIP`zsi zVRtr3RcA5xucwdCq<_;d4-xWP>3;U2W|C!bIvo1_1zDNo*5V>xFE+z7Rouw5LG(pu zUGC&`rUX&L_t>N%`}+>?rXC)P^;fD`X&H`nYLm;L4$nMxGm--_{7@Sb`o0g&`?ZGj z%YuhwZcK`+5_*%l@NWMDp(ZE&Lij(vM)QO+{Z4HCIVCRkdWQFe3GQHrlxnXSVNqKF zcbnT*)_zt@nex;4T@wuS6B(9IA>q;$Ba%M`f?HMQqYxy7ApnO`eNmOGL;jji5_S zKZYIN+a>&K(9{UvCYnyzhsBnp^ zmpE;XTMx~_Y0~M5f3mAdR3X;jI~y778BH!)Pf5BrF$90JmFmr9uy&i`0s|ASx!-NM zP)4iBls!U~{=Ji^L-!O3`EV=XhlPYE{Wz=l^UWsVUHdKMlJH#4^R0q#%r(aS**YUt zir;0U^xwW+Y7WVd0UmE2@vC{LC+_XCGd@@bc_`Dw+&}D8td7Lb;%RskXnMO;n~nt4 zwBPj1jlVP2>%5)t{N!6i*XpRhJ<51`oP5fC_$Cc!uk@O9S-EjOXI7l4H+PjG-ErE7 z3ti=fxy_wR;VczDJ%0;!sHo4c0rO7^Y4<5`Zjf)e*w~enH-xu3M=~S^hGa`oKDShm z#ez`oAvJ%6LuOjC9|)~oI2=7tP^3Ak`8t_g(sLPL((U7hVB6`VcZOO%cAq3tG#S_z z+`J7ggi>F#St6KVEm~n9auNTCK81!QJ$_+m+?MGgqzi}aJNtJpP3n zg-cH5{U~8Nv89{tfM|{N=#u>oIQ>k>d96&#SKJp{2e?klyo=e>FEhS=O-*;LNay|< zHB_*wXUJp!H~U`CH0Re6ED&Ju63Cg6RvO9{$h~%8_-7zxll_DMaEdsr%)a59RRr&n%cvF`lW zJ(@@F#gAx^!pjP!InXPP=y2glX1?_lOg-llp@?n#0Y^MKT-n_)VwXhnCpxbuJ&M?= z5~ZOO%ixL9<6cgskEBimsTdn{m+whElcAPDQ6aVC9M23H+m}QHc2W=i-ili;%I-Umd8AwM+()*b4Hl* z*SR&(J$0BNQek~Cx9rERFtNDQ4qZvUWS2G#gn>RZRUpI8Fs`F9+)47Z)1U-wFM(!; zWeQqYgz{9*OlE{YIxhKDK6uXDV(7+Pi#Iv+BRx`upv!ZDvca#b%!*IE&JZD;sU*vr z2nK=GSZt-->|ub_(}x3$xY>S!K~{jmNcc=27G-Z zLS*c1TAMY?!x0GTw7P>?56p=Lam7neP>i#ukh&gM-IBCk0feiUA0}VS!P^RG32tYi zF=@bL|IO6}V@b|DguM0XoMqC?c(Uk^f%6R$qlIe}%W4}|auPnWFo4EnupYXcBUX*{&l~|9qZp?L;@WTT0F@vv}XWy6Lo0S1=DpL^8+rCJEuF z9@x3Q^De}Cuai)$t;U5t*Y8R;BQKva&O<9+xEu`FTPGI_fK^_>>0J2+h12US60R<* z>~*~4w|YxIgiDzb`tEs;CzBrmgyg*m%pt=oDkgU zxR|DC{cuZ5CBC>Fx1~LVEuJ*In^Qk)kVH{cCXRq{A|cU`YQ3?!3emTYM-lM!D70}* zdCO;@ou6sa(LL*J=+u$M?L<$p4rdpQ#TaqPHB_^mXi&~CV5OW3xJbz_tumVV>nH1* z6KJ#MA#Y67s22nSGU0g<(pskZEIbrOA#zovRvQj{J3+0?Mew(z5R#QGZf2Ir!d09N-?q(A0wX4YYnQ zO!WMW?~*PIP?X$J>Rl$89T{(GGV`8kt-D7IV$fA+AV!LSN{qoKpY`uW@+QI4Y#ZMy zgCs@s)YlgY{BU16_g6W+(!v{Ne0Cq!a?X>ja~?t^V>l04wDBD!ko*WVkO2y-ofIGX zDOS-Tr2$H1KCi{!LHd<$3h5~YqAFli(hZDSX|FeZ0va&9Ka~vHRui6U5!9fopqu_) zGssmC-w$w6E~qTqptc=5y?9b4nekzm^u53c2>}ocsRxnFa#}$yR3v@uR9pZoi028& z1IIR=NuBt>_Yz_#$!yzn@g;)NC(V$u(f08h@^=(MrapUk*psy*#WJXxx?kMI z-zU_!;{0Gl!9Du_9edXuy*ei54ufPNt@nH+hmwSe87YSLzeGmAsnZa?`fdrvgG&54 zTpi{)U4hsUBUK)QPZ?DYj1DJKAhu{Im2?3!%pn-lO8B%rF{n`*Vpj7-mU+B;=M?Kb zCbx;-3m$x<%qQ~Y;zSGBc;->C-eqB&`kSkuT}DTQ%2}Es%Ut!1uK4Qu8dfPIWS1pQ z7lSX1>}l0&90a*%heCYcwx=$-nn-N{0Ik<1MzcRt-3%usrC}pE=A#Uq_!!!D^t0mKvA%BHpA*4tGLC!{AY1FWrcy4=7rzH|YLykA=2 za;?%z0o8N%Z6b}H-qgivGx(TnFd&7y<`nAfe=|9o_#1Tx`^w~TGkduo?LuhACI;|n zQmWdXi!%|M7L81hSLU}Q=+x6|gVev+d>Vw(r0gSI1gmk6Br!q8?<2+&fEO~3DL1D9 z7e813OqF#Ds$LaDb=?2L*4GiE4*Qh>@&+PsAix&CRD^xcmy2G&U$+y-U>5C(DrKSR zt47@M+0oZu9duHkcH%;$LvQ;I0&)G+6`MjA-F|hqez$c^KE(;&{ma#+7imCX-0@`dg{#uLu7#nm zf8nkEj>rD6pRcDrIq}^D$OSELEyC9q^szO9qpSG~16nm+h#@yv^okLeL@_SkJeIjP zA4LAAf^1)()8QJdI!=!imW{(EKv=x9kslk^6C@5tri^c_btC*O65WPAdO$WFS1FDR z8*niME+$j>gD{-hI3rg75)6sADyPRyccuht}LAc=A*hlC$DyEO}iXs3-%P1+PU4lL% zNsZUmDV0e!xDZY|qdIoA(3FKQ47w@Ot56p8dKF z+ZP)^^aziix!;J5Jdvp?y_7Lh>8LoddY?n&w3CaT0c{)END?cAaxBEp9x@3l7aXQ1 zI!IJ3(urrj8{wP@<_k#^Q9rwn??|%0xP?seIuG2E`WFI5@knyoP4 zXRLS=foTqfu)rp;+k8NHG?!#$TpvbY<+s{m+lBnD}1g37h*^tk6BP`gD};|Et(g;xmA zwOW{oR>{0l=C?e{KKnN0JW#_ZgHQwQWB*-PMP#%MH`~$XrwfT<5E(dBo&%(d`ilhh z+kX#*3tKRezz>V)$~eU&3U>OLAcVK;G=^Q>awVj{8TXBG7N#Q<;}kU&HK1a5x2_Tl zNc7awg}g%DsJ4FKx`3-9wF+I+IQTrVcUlVCpu_=BnVsnU79rem+40&lliSiBPOQvr zF`9)TNLXRkRR)|@k}3GlA|LMsDGdF7U6+`5X+cG@jihtZOq^eQ6QJ`ItxRA32~jgG zJ!Jp=OS5}m3Wd;I?2Phn)I)KjP`eeX|JoO{GZ_N@R!k&&XeP6yl?@OL(Pi{<2glp? z;k$o_Pd$&PoEm_d8;MZwz*<}U903I6nqemd~7=XmbY7W z@q8_8PoW;DtP#XrkGq;p)_XB;%3eZzV}qCNiochKY`_}iAMY9 z*RSc)z6mQ7|GcgFd=X6jPsE0Fk*Y$#rW$CUq3=@v>AduFdeb@&hRsbPbS68WFsU{a znSQx8SQ7om2}hy1ca|EnnQt!}6ZYc&^K4lQLqPEv=%#y%U}-*kHHn8Cw9_FL7_-JF zD)o?~wAFNHnWHjHV&G}?{>F;p>uT8LIqz6WuHvhW9OcCl^M&%9gZp>=vNT10FFEtv zh!XtrxtaiEqy!Ql#ieqnPV+dzxr6q;s{XkE)w}SCLK4X!+0l#(v;QKajsJIEuR9-$ zB=UUosJ<8{fCb<-D+z%64uE&aO(BaANb70)dCLf(bsFAI-_0xMwCfS47Fyaxal^e^ zxm#N5T`BjOB=s9~TdUz#LCIT-{NEXe!}k&q%$|p8<;g0-Gd;X=|K*yvXXPYsL0-l1 zy&gnbj+~6J$fM0In*oP0dcfx}(X2E3A55s02UaVl=m^s5+|V{pb~4oSI+w#ys2%0o z9!%u@(8f*>&%2H!84!Xy8Xf2_$Gv#e6j=F?6Nq813AaEJ9hH&yDo7_DSlzZx472J9 z-9h$h^17fNJ`&?~)!*As5`A0jd(6;TF8JQ#+i$`?uk+N$Z~jbd{8FnW@EJjarfD zvhP9fN4|`4Y~CTK8vk^clj+d*JQN5u)=}0M-xJCv$1em^` z6+=0-a29xLgFOQe@z~RaZtX&oC{kY~$bMp8&kSDa3a0XbHaZ{LP>T6LUG#NQ)x@J8 z)Lo1)LYoa0GW~&#_J5nSRrKLHJ_m$@_9Ci0VZVV;xmaRA%pA8a9!dV)28?LOlH+D* zoKNUSF9M+?cPTO>cP*h^Ah>BBI1I+<3R@@_*-6&tP?hwKFyIw9$(Iv3E&We zsa#!oD4@zNMM0V36Ga^`@R7DzU__lH!`XrF>Zk1Nl8;F|@Pq%n&vo+k`T0tA0oE8c zm7u3lbQv#z@YDZtEm;qN7@Ujxd@c=Pi&=N&sEj>c=AZfSX7vB?^cDYwq1ev*?X%Bc2oG!BYv!7{v6fS_ga{`L zu>;ld*pmb-Y(J|vyM8Ydumqp}rZx3;BrnWxQw3x?;9YcCDHyVe)mHv zfP*r}hFm#J1lc6Ckb%Ewwf!bs`D6vdpN+Eae~_T|SOqA&V4!e*RrpE+=t2U)A6-0x zt-pYZn@Kv{)gD#UaBa;r`dh_y(gh%o)>Aq0y#|17VK7F09Y677xflXQYtR} zfhr4&E@)g#Q$`}T&=kGaFw#=f{vRgwYD0s1r;C&_s0SaH2xS{#PgBjL^;4Yv)4&UTyMh%I{i zQ7ldh+!Oa8*7#3Xq^j5ZZ_y7*-0^uLDLl45aR|yECQ&8(M{ibeSg48rucm|GNe7V)=z2RzcQA>Yuml@t z`EmFKI`LBMu@fQSVd${~1Cn?FSyTL71HvG(UNFEpM2i3O0I^(g0phmS=#`T#6bODF zHl$+U9q-cmyk)Q!lYx6k*4vUtQXa-RkWf4N&jYEx^!KjUCs zYrw8#Sdu=OxYiuagpatVm%_z9V$f+#PchZL8o45Uj&{WTj2Fy;ap2;goGFaQLC9g` z_&ZZ~keL`?s8q>Wo<(2Z-&gv8NdA(i9n06i_nbtDNrC#wo1%~zjH65gL#KO33lY*m^SL%K$e$`)uE zynkwpkO}*7nBIu#lAb+By4j$qFd3^F!#mhA)<@Y=mLe5`>vhJLxyLnrepyxA66Trx z_cx286W6(HWDRj38%sxA3pq|E!bFMYuYUFG)U0!#C?EMSg9#qAGcql?opQ*~HM|bP z8U5|il85dZ9$My?c#)lo&`nolGG&8v76FlGV~O-l3Tk*=MP0~+PUnbhXV|Uw2#hNg z_{@9dqO--CO+*GHR&s-yQ(K!v6|4*b#1P zF4v?K?Z3Z_`z>R0rRL@knT<)QWKC)3z_z9!N0QZLn5#o;mtY%OAW*wNS;OLQnuZE$ zbmY#+m<;t0vLR3(qrmB?H+S0e#CqP(*J&a=$2R@%1e4^kw9`kPvad?k%%R~v6neR0 zgoKNCFGt2=#xYi}lfOo+12|a_N z%dNxAaxMtE!1p)eK>eQs_CmckVdO|-V3?(--|dB&wY28@G;t^{$?_yYHUteT9_^{Sk;jKFP^WZ* zl$88|Uo|}MCqY`~#HWDfxl_*CScvd8pk_PXU}?&YbctzjCf^SB5Ta*ANqJJB^=q`` zA;m+jPPJq;UfiPPkREOlE{n}jl<{Y#M2b>(2lsN||BgK=hCo{oXw^zoarwide+?~R zSm&FDY@j4Da)g6pJfsK`4C*^k$Y}{6bAga$rJ#?pFSFEf-p1E|V$!DkqCbFLJy+9veO!QFK?ehFGRz_>B)US?u-cpxW`5W=Z1K`j9Mg93 zk||H*8m0TVcv(5*vR}|k9-ke4p>YlRH0JJ`;W{iC2-ww^RGPV)79mAjMddcKZqfh~ zQ|yB>S#^Nm6;DG_^+p|HO0uW(8@&jk;XizXKK;*!6f*0L3khwU#Y8VvI2~bLZt;5S zz8(YXUSH)J_oKQJ3i3|kFDmv{mg zcJ|@uC;{BzzKt~XE{T=KRk&fsw(hFp}Zih-6kPgLts#HABqscNYpGr=eU zo2*afShPJEAYTFc(9ar#vnt#EFol@z>g{;1Z&bRpp(h z)zICL%m%2GJWNx%0=w}sy~r7zw;f9~IY_SQj9qEsnWEDX>Xa?wznzcC=8$Ay`}?h> z){iMYW1cCFsL@UYT5nJfdB?G}ib1}K{VWouXRiTXymVCb6^7jFL?co4mhaD!&7z%7=dXdI5R zV|F1RLEw?`IRNKM2_$>|)ZH2XcIAmebv-VaiNW$of^il4|6OBkaT5c>0zX7Fl$B{O zTNEAtIWzeIbNJ$W8QaCmi#qjq5w-Awl8?W!=}|2aWZvrDjpL*rJd=_Sh? zPa2T}(ghRmmyHJdK9i0?r+`=(MFPlXP2!v#$-fCl=H9x#CFXrYN^p{x4L<%E`~jl> z=Vytoj}y5V)Az)c%oF!jPTaS8yRN2w^NTiUzDV-iC?M@v1~l1-TnWJpED>ZpQ(7hh zJfpy8Fz$;=Q{?eNb42Uaw*34n3$hi|^BB2>8tyJJ&n~1Nk>3tMKx4R8Sd<=n9}!l^ z48pTByWN+*y*dXKwbB)w6$=g-+fFJlR;BVUo1~Hal^K*>S0J}?J-n~}?ZUwYnrANz zcXJ%UE7De}`-Kc6*KV>~LSKAtyIF9#;jhf&)yiL=d|B6>1(^b4jG%e`<1go^K|`wl~tE z5s*nU1;-Svieb~pyDAdesTjUq2~81bvaBP6#f0#gUdqc?Te8b%3X@0*fVP1>jT?Jt zaWSzFIaz!N#IK+3JhgqDiadC$zW1#=TT zz<4f}L_}2uwA<;oSla{l&Yz%VKrHRC@^bl@q8r$uaic!aI{k1ya#2f6(m-CZR$hnA z0X45cqRkApB_B~IP>_()Gwqqm_6hCAxkv8vF*`jXZ7o@H0F-x;nh>DdZ;f`#wx zRn?fER0HsNoW)jbjx_P=#5!aU0Gm|{ zL-a-e_JL9TQm9Gaun6iU2hQNY2`70_llCt^+2Q_WrGXKTf7xufAPWUIrtTN9^flAM zOX_4W^O8y;ZF%>bk92~~4oYnX{%|(lutt{EJ6JX818h}Gh?%0gJyW)}YRRz0!Zr3h z2aQ9lmJYpuPC?5w#7R(armKES$nTyFYpR_GMhgD1mNNR2-L)S5s*BV;K0&Bcz zca}Y3rGzFGqs#wo^iUvgxnGj%`u0q8)UX3TCmd1{6&IqH?&=vg;wtKK z+|0n~=|i#BgtztY&qj#F>%TjTo6fuIM+i>)QanDaLZZ?z>Onm;@RC(d zf!2PdD?NYQCBlkxYQEOYHx}I5hIyo0HKO2hFvjbw8lBr_YzZHNBGlok$&=TvmN%05 zurkjCo3hLb!sf{91Gl%Gy@mdfw?DY{(8W2w?^l+E$-!sA4@XRV*}R{hByc)%2F{dC zpI(74>}Q!&^P0?w39yMF-e8i}A)M^%0eWX#CvqUA6Fxy+wlU)5iK%pT++;^j8&E z*Vqq4tVF?9k3-3@iJQFxoe64$x1fq?x~ZTBuQQR6Ky<{yXXtb=?G3C(7EZMCM}kOU z=Y&ImKLV8q4ZNla#9(vSm?0W9-l0Wa_s`Ufusfb(t^;BRK7D&vKIUeV{{oZc+>H7` z?_RW_(APDQ1Y1(Df*V$ESPYhbmJnO90rvX%58okgLu@7?b`ZXhkzw9_=9}E*x;4TH z#o@DbhWdB9V(JZT7<;bc;oW4X1a^pH%McerKcK5e5h`mRzOJ1uKLsuYcH5tr+$qK` z?(e&c3#etzIEv|w1kTFhaDCoQBx(INR81iZ#p1^hm$PpKbzU&L<^-#%a0?NP`aMeP zU|}%;bp8(dhHf9V506bb{rlKUrW%$(Sx}kbT!!t886f^BvZJ--_Rl1j-^0F3f9N+w z_j{ldgZght4J34O=)TR=?M~4hcu|(**=5PVBHFcYtx;>pOV?kg@27nw&_I`NQPlZ^ z_M@D`ME6thk1GJh>Bi~Pz*#e2-4itRq!{EPT8b8r--`=jb_|9kWeEgVFJBfH4ca|2 ztAqujtL@|0U_Scu6BDeH^B1IfU;KVhd>4B)eR0BgEbMvo{7)7g6ctH_M2T1@D^l6D zc1UKEkj+XO@TQYlBS4j>+va zX{TN|AD>u(GAVV@bLQd9-B4~(Amql}gn?`A0#WbQU9wd-sDdXw&9j7Mp6qVDCuBtf zZSY@bS3@;n7H(RM`6kpOqG@+YD zc)u+#Z$B>8CwLSTC}EM3g6J~$Qcyi!em{O}ZP#lg#8=7fJy5D|g^tb8()t;H@C*f# zMtw1y?v*v_i=8g^;9O)NL<~PyoI`0mPL`txtx}2--isU}aoP5(nTetvhgx8=gnwhg zf3B0)yl;PT<%hl43JNRkhHLUb-So-s9dzgo92M%)oVaI$K@n@sfyxk8eUQ}*+AJWP zd|O*U_Ql5)7>E$KDG|2EGjtEKKRr4tOmR(5n&ske1ieNQW5|$$9Q_|>W3;!Fg!yz% zf7h~2J&;kw5oEhH*Y=Ms4Rl5_2cy7BfeJ0gq?X-7c;)IX;UQ$Ti`GpqgXPn7nY^ir zg$sv$8;sDWA|q@e#y7`alF!EoaEYz~q_kp){j`ibMXssrm?%}7U69OXD+>WkN0NQ& z%ZJDP5T$ap;_;swwK|yqUftU$IueEptNF%~6lcXv66TmnSlqkgiztMaIfPDv8&bI4 z$N2U8S202;Q(k?Q3|Jms{McHS2VsKVhb^k@SoyZ7A{&bdSkUFKFWV%|GjbB_!HOmV zb2uG8d?ngNYcfPZ+-WC|`(&vJz~=(Rm063u)H@4L@-}RJ_F@13@52)$R-|FTw8e`> zB$Fi&n0U)fJNSQ>Rf>KkXWigk{az!U-s*jDs>yj z$BUCCWe-c`XJ`ORG7!tYxW?;s+(arUpPu|&uQDAZAvlDfDuj49@>VFN0dw6`f@+CZ zbHNf?1Y>jWhGW=IQQlN8h!`z_{h2SgipU|n|D~^Bjr1@>hTZKqI)N)P+gk~K_!Y1Z z*!F6RRX$a!`!0b0tPPCFA^Dxj{=2;G9-< zUNUOat=;4jZ5x6uCbSZmK11CZ8r-mvODOO?Q{ya5B)Gwh18M%!HU{4Y8+MS(}#ubhgu6I2}VrfQdfmt^t>B5P&50emc8 z;XVgf-d`^3XR&k>P0 zv`*W{<^Ok@34Sapu*Qo+ThopF`enjF42^!-L>75=e)$xBSy%8=@`o8)yNF>wei2g> zBWqSjysDA!)qxIF*%hvL8`4Xp=yD5ruF`HFClm4oP9-Jn+&s&Ykm9u|n9>Ama?|Ck zuxasg^Ur27KuQALx=j>QL^3des@J$~g&n~Jxd~{Q8v}**+=?@WQpILbTXlH5;S9uf zZ9eGqA2;BQd^v~{%!5MmS5R@CP@%YZwNiPJLxd7mmJ1I>UF9ps`#b~X zqB6tX?8hO{w*+*%VJ}54{MEQJ3ckHy++sy!Mv%AuR>0>G|Lxihvk_CH;oA9z&n<>b zOZ{RK#dfeWULzbgR%71>#FL<*0a%cAIkq-PPT-{5&3{; zx>5#|`vW!*jY*v3KxvN0^WBhy{XopP_BUPdbW%nLTZ|8xJtFA6Yy2<63NnN<;Z;^H zl(j3LW{!31Pqs+1Kt|KqYUbQhTy#8GQ0WU4yg$|~QQea+S)VEn)hOF&`{vdQUM6|& zkoZx23O}PDn{eGIpP|a^Gq%?;Aj|E#6*;{3FEB8_NV>ilO?UBHL`>xJxAJ@|Rrafj zu51K_KNHmomaAtx_xF>OXox7b%3eAxqVF_o>`=kFM14DOtXZp_caZ-_xW-pR#gsKMP9(k%6{2iq~EWnf_9qb~HL8=r@K)2cbK**!U|`N5)Va2u}a)YWGK^rV&GMo+~G!DP^qz$<$fc z^)01b<+tMaAGGJ_Bjxvz4T!|p;A-O#d@_49+?`{uZgW*GK8lGgQ`Bai?A5wf7-(kI zG!^B3W0@fyp0a3V4#BjbXzwX>(vwD*Od30NDhC7nv-=(4PWmSIV|)(M?K}f;}CDE9nxytzb3D3Pp|Doi-lCe!HZMYVpfWP`vSLT z;2L~kNW;xH426uDthtky9!sa^mB#eFl<)brI~4Iqvw@6^b9NVS{CUtclUzM5+TVdW zHce#vS;jD+L_}Q?0lck3Ib1P3OAdUf6U9g24IGnWoxF;NbZ zeQ!*SFIbZQgu9RZLT9D(s5vuLOaq|QQp%#l(eB$tfxGb`ZD(CqOMAG3W(WSb`#%{_ zGQtNDU7-j#i8Pe_(S3;MfGN+D8`R)JwtO?+g!_ckOPu5u?hV}d^5VXTV2c@&gg$|J z34atQb&Sw2-rf@{brR$ss819Q{2UGa6a1L)4N$2M6^Li3TJl_I-Z?+KM>i5r<^6rXLf$tI{!DFJ*X!I0tk2 zO6n|!d<>$V@Np+sB30_Uzz4;3VAkB)gu9m0D`1G?saI9n(F2>6Oos2`1e4y%D0 zM`nLMttx&y*f>#$LZ*=`tV3@%R)KHcUB`Fl90N$!5c7Lv3LDhS9M%`}3^dO?lYX%4 zXA*t?$316ZEaA)`I}r0L#?SOytKUP(C->)?^}%D{_vG6V)v@Pv`f74{1NZS)vTbzE zND>>&R5YqD=%f5RK4Lw+b7r9o?bnYLP|wrubcBBf>f;fk%ts`1=C33pu%2M!&)M1i zj6Bt{)|D=-1^sMY{_=059m#-0JoLH(kYYq%|>VMNegg312h6zm-pWZ1Vn zfBsI8JQbJ(O#V1@jM(EDbb$~brcF(HOJdKIPUmOS4`G@{IdZ2f<||MVkpz#4xhp2- zb*2s-yx;#2cfFnzVM0kjmF?T~H_xzvZ@7Cs{q;F?zP3wj4K+=x;O*%b`*?yk@dVyh z>KHIMH?NDiUz72(+!rD~_L?THbcM!7&Aup~E_oFgRqZX%$r4V9-5sttI?tJo%nOV> zSIIdUE{gbnBrVs+ODz6?`BSkH)gRIp&r!`H1LJ;uN?`(`CtEg2Blea>`EEuHKh29o-jO2iK0Neq~i%Onw1EHt49rW=EJ?OG_Q3h7I&63#B) zA}+F^d}Y`jh4K{0T~{l*$g=<`$QY{zE`=m+ebZh#mfU>R-41U=5p0|^k~)s!z>V@* zUr1b%k+53dm}!BH6erm&!$jM#oqswE{uNm^VNya*iq3j6X#3GX!I1v9} z2n_?&jK~T8%R(yR-RxU;h>b8)CRi{rueUT=v-iJ#lDU2jCd!u-$J^}BkWLvFCC766 z0Q^O{d9YQIyQYrw_TbG zs**MRSrFkO2TH62J`O!iD!uWth#Zq2`t(~Kqw!p40{f?7pp-Wp7ujRSbo#dN!lWEw z13&H0Un?bVi$eQFQ*F_9( z8UtZ|R+bY$h{l%%S^g4*9%M~F%uTIJ##}>}J>V%Jw(0dVcV*XFrd#oGc&h$%ex%!= zdP~6s9tkEif`PrMX1Tw;*M+`%iyjCN>@7av!NGO)ky9UujUay2IBzA<%Fbfyv=)Lr z?ZA%rtyy+b zd_&U*fFBhkqOT^4HFbsk1A}K5a?h)D6BjE*xcj&-p_bWGq#zyo8xw$(%(q0uQK_eK zADUW%_`{A61EV2SjAFV*Md<1wr4nNS?5BZ<@vSn(?&U4kZ#CXfZ=!9Oh=)9$b%hB+e-KZ_c3iR1N2zJ=+{fhK*4VG-YyngPCbRK2RiowZ&ZbvkXCKNnf{TQi{#H zSKGD^it{|fZENCdfjJco+QFLF4WO76a=3g#?WRY6`y9wm4L?G-(O`^+RG#iYFxw6% z9{6Ciu4TUhSBXtY&8zp5-}&K-cisv|br(Oo#YcD8r_aRZzcCjI;b5(#Rhqgxc5|n` z#?wg_=EK@PB}i=)edN=@){4nwGfrbON~+KjFnF}@crw_LXd7a=lo)nP_u)_kUp_)a zTXI_X!x@@BmuQ18IoK1+^H=sL*{^$o8^532K$pIPE-JtB;VN-9ulHKKx{$r&b)~Z) z>ap#F#Go}epYCmE?fwvFsStWiL8O`#>@FL1CFP#BSLZi!IZ3 zsruUiG%*04L)`sD^nU)I*gsqzgg0^UIV%MQk0Jvl)f$1L2O`daqh+h1N~gc9oz9-pfS({IpsJ+o`$EQi+@Qat8xV<(P9|s zZRwP4*$I0eT6Y1C9>&%DHkeX**#`8SJ+l*+TmM8mhL{5|On%OVZinoPTog}YU~`QO z5Hb(#q_@i+@!fVWs4d7%Lz*6tlkWIFBu><5Gsl%w@Z&o*WkZ&A6Tt;_7sA@N`{Uq{>^tMi$5A5;5+hom}O1){5`L>$$M)8=cz+qJm1{7~AiQH*OeJN*pNGW(^| zm2RkuAcaPi-0BStPf`I#Rx?lOveZ!Ph#Oeofop0 zRk`CT#ixDhX^RL?x1e^NHH<%vP}B5JP(-Ve!rp72KFOyEWx<%dyTG#gf}Y{T*U)6; zK{1kqMG(oaCh+uU0e<1!?5XxRN%MJyFfV=4k3`0estwbTl^FND6~4q1566)!+?$Mx zxbhrM1hmpd%lrZiKEuW^!e^uDJqK|NZJ7tK5lh)QvGOn-&6~upoU}IU2;!8QtALJH zVfHB#26`SJ;{OAllrgf6v~>d1>H@KWV?ijja))ibvq@~yd8VLuMUMvfLOvHNI)aL| zrd51KwSN5PE1y>KWUi$M{XnVkBV#!F)sY)2)S=g2@udCVcHx(atO?Rn9mmupp6#~nItmTQL41jnZdy_MaFhWvcmpB2(iPP%^+^h zGt&H69}LjF;k-AT>ij2=W7;c#yh7MGP6tIffEPVMdU_J16+v|?l+R#6LTq!3KB-$x zh4w@DJ;{GVb3yDpk&m0x3$>YeBf#}2npCU8+}y=)5U-koh7AsADL{E@8q8?&?2|su zQiV!&5w!KcX__v%0;}@Oip%TmAnMcmQH#x7w@+kCX`XY>h0kH*ch#XJgd)^T0Ay@g z5e>$h=MMZe%XQPN?lnCGTEzix&XS58ShMRoANc|7CXi!C(@EiX*vP4w^*t)(IN#nx z?$f43TYrLg&x0m(Y}kGtY0F=cb~vK@D8eEZb`RK6uLHig8aPe7%;$};PX_^scNy{Q z+S?Qle!C&_jo6_*Az^1$#QIiiMS#}c+Kz)2!wZW$qt@i{RN4mle?&??*RXO!%5rdfD$+FY@h=-=gy7s4mg;x)@13hYJ~iY^vfhLpJ;|62^OSz>V6K94?` zo&4T)Wm#%ZV)7rT$;$U6c)BOuJl*hsz}*~6(3Rn0N$xqjTCl5-T-@vGI)wnS<4h-n zZd#QghabB1$=3T%BA^>?hSR?zvatX!`-PN$LG7u|IG zrZ-Qjj^Hs*OYB!uXgk|9Vw_y#*yWbwD z!Dijem9bo;E0!8wTqq3?mxx`M6E8nz!m@OQXE_P25JN4Q*oC1Yb|7H!e&%3hth5BW zsCyUKf(aY&aK1IiV%P|Fkl+GpL!-v5S2aHukquX$2jD9@SVx8z5pN7ahDEF8>djHP z^s#GZ?OFl4TRK4^AQcD&&}O8h}>YH=n zP2=j9lnMRzL=39q=L8O2+eunWx}wHJ1Z*L93DSTWU#GYme=4?g2Sk9Y$?VMf^dVDl zVZlWEXW&)q$BstUzuI*~t+nlt@>{)WtyRe+obHp|S}%&m3wf^XM(Yg?FRYPCF>nex z@)o97*9cjS4qOVcSi;lc0Q^IQ74z*~shZ9II4K9Kr{mnAi}2HEu`zDEU~}NF?N-qOTI5^ zdW`BFpuc5(6gHgV+q!7>3^pAS|K!2_WunBv66uZuAsK0xjg!U@>g+n=#3`i!eo^Ow zjTUdxoiV-nnB>}XKYS%sR7}Ea{)(SAZZMCJ{mxBLFn9EFA4_ARi}T5orcTT3@vPWw zaH`&U?aZ&|$jrN1vsNm&im08$R`)o=spiOf*t~&M4 z9wv!7kK}o!2{C4de0zov9!DrwtFkc^rSD{uVlzd%jSsc)XAD4ps{RK2qIAE}c%`2Z zHVBfgiY_BhwBW*(<(4T*91rx$8i$A}HE5LtiUdsvIlUhQD76Phl4l9LdvO^{L^lD- zZ#tAgCor`Nn`==%&!{WOe=R;9X2m83kf{YuFaq&{T#&i&y#k8Eet5pcmZpuDKd&r^ z2Q4R#hw|{^9w~;%=2^J*JbVY`ry$QThE;tqYr0DT3EMUY#L4*1a3*vC02hj7n44V)mxhQ8PXPx&=ccjPB0S8n-~M;fdarI&~(}N6r4G6 zkwgdLyr2d6IdBuKJvR>jA*hq17}q`kB|3X*g;xvCBT8}#_Kk)HW`#X-1F4fW2*kH+ z5wH49zf`j^UD22jGRQF`A&CuxIv8O2p9baPQj@=3VTG59RYzH7`2#&ffai=1%onKV z>5y4kO)eJ>e`iw^{k=j(U)KVPS~#1wzrQrGkQGteEWs)6*0eXwh1!7=k|v4HCC5!+ zP=c6h0|E$zFyjIYS(>pF#DPNlY`!GeB!v!I_2zK$8rnkIQ10m`IOprzHyw-f-e$kS z(lavCQ4MxVOvW<9ADKEWmM-(JPT|ujPVN+sSz1Q$hwf1VOGuai=>9kL-I|)2KlQQ- zG3-d-hO|`;Xtb|vt%x?uVvLqWlut(ldTa5tZ+mH@_1cJPbIQx(zqnE^0SyUC7$K1@ zw30FrwXx&f<{$rXZty(2wKKwS8QiaHLj%UbfWkkLwK(CD@j-iJeFu4;|AKTB->x7q zttz!*jNb3dLaTLH809Hu9F_)txb0CcD9C7B`V8qYFa5-F-TSpUdQ5w?Vr1p8fbdWoRf=y)_gnm>KemZWs=2x$c!^A27@ zRr%R-9kY$SimeU$22hR+?^OK@tl%HShaFIG6?)|fL6LNAlGH@ajsz|DVzVV$iUN2$ zL8|)<<~>n-#JfVdo#Fm;Lh@r*&!gl2l0M2AZr?(lOT_m6 zi3J5pR;lq8q~#iwKnewh**`wAvw zP(_=z|B@C#x0R;q?ytC8GU1e_Vmd!^*0Im(RO!tGT_E zoHJ7)yqS@q$e=U$F~;Tu0%aRok&F71eO-T5eSp8cV}nxN{3pdfUx4-^l)$n>mM`@X zjiyknVMJ}OhxCOvCj=%mIH=^G?)VRV9-o_l-pVMG8CZww*e8LXi&1a1qr4o^^wtr#A$h z6GZiPWf%C6sqxi_pNk#)Nuoiq-Wpt8pgc6nnMdh5I8kbiYdO?M>b53U+CRx2 zHmb-zF1aUM;YPDJ;sOq!3WX(#6(j4AAD#>RzC7B$y~(t!GZM&SxcLdc?DrMGFSDp-;$K`M)R_nzRIL~zyBQ|B)kaR)#H8qt09&2J& z(DZ3?dfDu3xdEE%#X#y97>%gJ&&bV`a_jtRQVg83?lP5$x~nz-_&`_|9T>Yd;Vf4l zmJgR+ZqqKj^u3+3EyDE8-a-Oa`6pnIbn9rb(wc|;`1nTKwmT@$pD>ScMeDzl&U>P5 zF&RCw28(o2jfm%$jDF$vz;OM&Fzph{l_ao*W+ zZpB0$pC94e;YLY5R)LD;#hN(1!qkVarv7iOKR?$4}gp5r@X2@i`!#fH?RR+!1joEiE-9#-ehCLZBcHZg`BQs(!Y% z3Q!3ffp>x!7SVKDWa2H!xEX?3&e&np9N9<~2h`tFnRsam+FoB{I%IT5zk;0h zM|2v0NV37@psJa^?#++CYgB5}kbG|VZWr1y%3dWboQFd!HJ^!5T>UALad{ZKXWq_O z)@n{MWwX=J{|m&#DocCyd`IYcXp46EU`AxfE0R7uN+QRhpSU;l8UXrZH~vJXhhD+j zhH{K6W4v=bbG@I9hYc}B zhVfaLP8}vI#>5h|uSM)Wh>ko<39KUG>-}X>$tMOEJIh&aFt6);0G?Pf3XT{XPLuR| zGE3wYsefWcB$nt?_q_cV&0yI2hj@Mu#1t!_#&N25&3VB} z+Zo=+}Za(b}lPc@wGO=D`$&$yjMJ-BSpeK*f1Og<8H4DffO?NSr=SSmD z8^nAjvb|V8w#&CGYU6>XhiX#qLP>pu|FF-OmsMi$cXY3XxLpLVLrm;{(q4db5tk~sgd^HzxTbOC&^vD&VF7Fs{YF$qrf z1a4s_3;vNj&1im24GrLZO^gN;em3G zh&ndd>i}rUa}(GbCgf=hRq6WuT8JlW+BCD%$7-Gv*};Cp(vMa5uil71_@z~Hz_jEd z22-^+uHq!ce%@M7b5hXb`HRjMYlLG%W}Ru(XxmoP?@JU0TzK@KIW7Vo4Hb!zH)=B* zZA=#(bU(NY=>f;SErno7)Ab)C&GfJkLa=~En|>O4*2zx>SJe{nSL(yzxSzejh<=}X5yK6IZm(IrN3;E9?LpYYEYI=Q z{+}qV3Y7l2ik!mBVUjE2H|l$Lm$uWC{R?rdUzR}fc{cy4bXjyc;`Xjph#g%r>-wMwepFggQCWqCA@25%CW(V1-wsJDb zOs`ZIM_<|C6t>yq&a8}*YMErME5^);_k(pf6$pJOD!^@`kTMQ=;E8| zDpQ>`ec_}*?0`rP7stY9$_Ht_r@@Dum*EzKHYgC6D|& zKE)KQPgxTs0#`JPwcizO7GixE1RL@fI58^xz6$&Tv!0RY5K zh7$@M9!PN8!(9-AP^OK8PCep*X(UPKL(k*I|Bk&NB1t6%+G#p&ipa^GQ0KhYB|e3p z#OSWaqoVTP)?!$Hj87HB3Y%jxh;T=~ST9|}x}t~JL7VJsbX__h-h8;C)Mrf;*s%!< zc@OKTXUg3Yu=wE^G}?#!oG`f9h8MS28#L?C5?_r%XerB<)}2(p_$w1irt0uB5YL<* z!_XIKZIG0_=NllDk;f$^>>JN9uVFZ_d}Nyr03X8E z%0H||!CZlV7Fj3&3PpYWkppbvHrwGcs3qrk^h)ym$jvswR(=XEb<@eq_AHJ=vU z$ai7IFM7rVf3gzS=+K??o_WB|1eL&KX#+lN+^F|F$qenLuGiqOX`SnhTmzXtYvd@g zl3G;ahsJ`dQ2rHkV!iBv*1*&;&`Qj)lR3K)7**M{Q{dhYw>t_d@w^0%1kQ3?R<|t^ z?xGc#>e`9Rf8tsT#~PnZ%CRBbuE`c-A8OwAk>rB0ZJAaCTLEh8H|hyV4`D%C(p;pD zpzDP!&B$37Cow&Rx~iAG^1v!BXp96~RUVELOXB92hap#>Zy-nQnV{tvu{k0Jv6Tn; zGRdciak)0zY4Vf`^xXIT2+P8%WHtsIvxK0df2LNpEk&Vda9SzG+QG*AQ5m~>g;U+N zxn^q$h<4Ag6Sy{kQOI7yOV4MEpalo9?N~L_CC@*fZZnqUMn!<4{NXu)gd(Lz@~}8B zRDaA`hV-3;Uu;;94&bQLjpaDr{#W{`YK%x~v>IcLmx+)#W!qepKB32#!Ah7q%t`0A zY(VEsXQH;jw7_T^{+R^w_IQI8CGxc7OgTCtQ<9}g-{fS5RoEH5}%iEMA> z5%Xa-&q?m2!AJx`SUiKm#P&GAYU^mDxY2hJ?y3gl(T5aPYIh!m?RRm z>~A7LNHNASnTH-;ot7u;lbZI&rc<%*#D1L@xJ@0gcY09kM#DOV^G;+m&p73`)@zu zm}1U-&krQYFhcu?*7hM3SQzMIp~w5?L74+1Yk_;tQjt%_;bf@}EJvjJ5kPrtt@zoz zDy^&%Cz&4rS8l}?(((L{L?`f`VgaLx~tkT;CP}XM&V00H4sy;daKndTr^RE}?cY405xlt(Y%qUgl=U-O~HVrh$R=2KB=Up=D8}@7>b5u>!XA zB=UW!_Z+;s8cC;cUwon}_}e^ddps_%o&vFHzhe89HmA9#Kl@)^J{c6XEtxdg*>#eI_~NLwUuQ}w8Czob*xU{< zSAx8fcRe?`lIgUiVC>cJ8vP%l&N3>>_Ko&*#{g1NL&Hng5Yixx2q=L|DcvRAbsqm`opnCX^KsUFU-!QD{_Tc?mFagC+M{@xiv~4;?8tDZ)oLVv z=HTFw(C{z;*9xp#iNrIY0uMZuA}T`BZTwHc!`VEsT}}Z}LTX{dXe@I;Q1`9Y{4Z`g zHWX2{A9-Q*EsU5RI1x*XUFVR&4u4@0apc%73JXedqDhT=o*tfZ?zb5J9%VkMc^rn zeB;Ctm*z#h|M_RD(f}r^HRU9$0artw(OfX}f4a1kJ&_x=-J*cHu9G9WS7~>|>Uy>V zeDL_qh8H>}Uy_)p+S}tNFjj8fuPsiZBXevk337M>^;?bMH#i7R#_cE5{h4yx8S3=P zWhvTmIb(s5`=dLPQzW9ETTF-EyUd%wpX3*D;M9@q`kXl6QPstO74Zv&Vc)g(q!O8p zbdyiFwmC*h1^10A+ldISe9~$3?{(zeh#~>2b_0 zHiS?&Rw&IseFa5SHxDFdYRb6qb6lQjz#XQ^4Mbnq-&Z$@I&pIL$kUVrR1mi4z1>Hm zG_I#Jq`?5v8v7YWf=IHGe?>Xps!em=U8SJoqGa;x;^|*lfz7FBehKx5vz=`S$9)?; zmG)+yKn+ItE*1<$F)>i{81eH|rBpjnkLGI2{|9~~vpl%lh*}4Ymqb062i0EQ{HNXe zhD*S9(E!~ZHyl&zmFB!V4YfhM%s$&u$Dey$a9%%3`@_Wx!07n1=zfZ8V{tu1{Z>D} zXYa0QTv67}>I<6@Uf$Zlb~#r{GgX8m4AE9NLX(z9SmN<* zOQ?D|V#kzl-;5;v=Sh&;c!5wC$Oli_xa)9S{(-_ap6@jL7=>Nk$RmbZ``G%5=CTtJ z=zpD!r|KU!1_EtuW=CGgk7)f~LD-??s|7YoedXr1vO>t12#8AF?E0~5MG}n-;0K2aN1L9P^xX?nCyzHLmQ#)_@y%7J(i+dIM(sKMPv&H83vUz2# z2;oy?%^?r+cvecONpxH6&XG^&{V7NY)SI%zQ~eP>e#+f=nb2|xLaAzCaXKjzw3tL; z{Bz76#UxxKehJB$&)YiSlzr*Y2*nufhaCj14`k1Z)L^_K-(22})1;kKJkNDP6GVg; z@W12lP`U|g5;-iWPTPJR6@kjzCuBRz8x~8Kgx~D`4Lul@M1n+^A^zVMnD}Q+uW?tM zLk-vpV3?^(juFC{M+j0*^r+d&vzZf&kdR_uhoYNE*9b)`BhM3 z<52xr7#YRLH*VUt7LvOM+Wr;0lVYOB^>o>s`|$=37wzWH<^m?&{V5|-fnW1HWM8Z* zfd-`W7YjaW#ZTloO8nlVH$phyxTh$sEnN9S)uqJ%2m$|RJWD*FO6L}P?&A5W?;vHP zZEVgni=JIjkuM$mBcxX5Lao*kCxh~16p%L>|B&G&Q%lrrL89y}X~Y}9$V)&P7Pp(~ z5YQ;%Wb3kdSwsK2jCc9hbYL_tr1r^KKPjGOlTL#S#6e1x6lwFAif`!kS?9&v<3^%y z-AgUuP$*nb`drqB4)h)RbRFA=+Cm5t-3@+=GU28oKu>G<8MOvgb86W|UvWv86m+Qo z)vG<#1rfU5+-#o5%D+qGXHc=JbKpi|P_~sxR|T6L!_S&6a8yumB0t7p^h3outr|Ry zm(3&?`e9FDo!r*o_jK)`(sr~zLPTnFFGis$EoN1JENUgK52OW-$9MZhVLPf}OxF}U z@j^1iA^4n3sNV=}31+7c_e_6WR2O{yI!NSf8zoAg1-A=_7#XD8;_jq|^erca+!*_X z*?FDbYZD!~J(fZN%%~9h%G+Ah7UO9<{nra->!`#xfx%#(@3JG4ns<06KB}kmM|^^k z{dY|#yk)7s9{aZ+SDv3V0*%nxo9G}9hWtK6xvr?ziQW&OvsX)fKOA0}>z;`9|7F%5 zk#I0E$i!r%c=~4=crhm<7&VftE*yoaWuPZmR5^XfonpuyBrQ!G2#2Ihuke`|m?Fsr z4*VfKpXk~~02`xMBcmw5)Xaq~^SHFj+}VQw{o57mQ*);LdA>WSY)a@P#G=Y@3MQ#X zpFy9X_!Uz~{@$kr#uoQ-x)umSFcQ4q-ia^pjwpuy(mbg!J8B3iSS%%4M!) z?!0^A5tGoVd409?Cwsw#2%&$PctTGNU{%1T{!m)bK*d5?be&N22Mn2K-xt4x47g!B zj>6xJ;yc4CkN(@ld9JM7qtkHUh4YgM2xq{kJ4b|5ey| z&_l_k-N~V+JLtxSETbB{^TU2(=50r7;;_u0n?4upVV<%TfDog3v=?~chjCtIG3KUA zWb`r2!sA~g=GOj1QkLxg06{pTCZ?{5(QS`r2M?fy_3#+9%jZFJfDofrJp7AXCne1$ zwIKD4&qQ}bjZU`2)<@K{Z?-q(;^%?kT2GfOrEtQBu=MLn1-BChlJ$e*&crTECi$I@ zG-{bG^}mrY%s4?-)p zOMl}l-w*eM^I}Q*(r#;v5cG#!#n|lFsSKClrNY-du6XmX;)wKG+l;)* zxY49!rP!E;l>leedejPJs-u~m@368s(966h{<_`T;+aFh>55J-Hv|cYt2c(5_qe>7 zL}$+uQ3Yt~kNZLA7OUe=@fZsMZ!86lR~MCV(a`IQB`UaWRk9nU6>w2L0Uln^Q%<{n6U#YlU)lm?^5wbq<>Sc_4dv^eXX}&9 zkm%e+xP?dec4CZJCOZ!KJ%hu=-A%jS{=Xd?`yN%i9c7yZOa`Mtj zFCO=7Cj?H8QR8sucY$*aXcUi+;g?O;iG_1BLNTqS}6{JjIP)pmsssOdfq|LHnAlq8bcK$c(C33*aW3M++0`21XY zJ@}j53;1O*yKFN2b|M|RkaXM8 zvFH{uII;)6I+;}}NH4a~ERNtNGmfUm2ja5g#6BSS9`ryL8*I#oSH0c=`PONVJD3w6-USvSTod4m9K#@pN8;mINy2twqOfxne3i)r z#_o3jtH`wr8U1J1!it8L$^Famkj||WUGZ0>CzC|SK&X={KhoX^-m<{8Kb9#eKp0oi zKcSdYDEBttz)=#|^7Dw@3i;&d@Io=Dmtu){Bvp-cy82u^+}rv}jqiC{$!io-?n^QN zjzgrz+gGdghfNWh+Mc_hKYYBww{3VQV{xQEF;2jcTBXe^}V!mXstqdF>xY_VM07ng8e^#m%2-C$)cP6{3x{>4UgsUZ5c>l>C1{AYXwgcuAo_ll6`(Iavt< zbsqcgDNx8F{~~ti9Tz(#RmBqK-eQ|$Yh`7WK8*c6ea&B5RZI~U5H<+Y#XVX;E)1V9 z`ES zdwh`>KNs&r@tVu5-2iMDMmuFt)S%YYROQ)lc9cy7X& znZJ@tmoHBRT_y5m7k1shDdfABOoj0)@aqB^i?sOXtwGWPZMs0x2}n=Tz+Dp%{n{0+ z_LA~A&x0aeWG%eE%t!KPqQr$bg&c=!2-|&hJl%UZ=jxB|U5PAYhZCxRqC1Tfw&bUJI4P%wDgGPso=ppeA%B*z;VXZ3Hae-c8~ zxxaXnBUTRc%0G&Oh}uXS6W5zSPO#og-qlzqx5V`_Q6l*Jr6)2L^Gdp@TbWRzUbGk5 z+fW&6!h^HXwQ|ZZabKh?2@2zqh3Su!M2>%ZFF%T_C`Z79%lTNW=Noad0m=zK<{sQb zLO-%--%lCC4Y2TM`#4;-)TS>%ctv*xSFe}AaGo%ewPlZ!c8Z_Y{E|B)t>LW2I^aqn zQW@o1F5TI^O#UP24EyO9b{~BWgieIctJoLSRsP2RNVLW18)5q}JBduTYAJaP=DS?4 zfZ`D~;f1!R^|tZGLmqasiNn-De6G=-6R)b%zZ-^3Vnp@v>cZ-lsE`;!0mdoW+6v~0!F)!bbZ%@n$17!J>C$dD#v@*ODKJVrz4ZFp zpP!5+p#SVe=uteWg*V_yU!g3~%hbzNts>qYjUsDLr-n#J3FY6y2nTt=p5y-DDHKhW z7-MJnSwKTe<0fmr(?Be@B=pagWyFFM{1yGDK>iL1;hBNs1h3TatA_Ek0VnKxPh&sR z%t%E^B%8}q)RRoXSMsd}1=7DzO~+l+f6M?~*%PX!_~ryA*+HA1SZQ=MRRWcPaABE5 zRnK(gr?E1zhnZRWly0Bb+kIP#h34wb=ZB#;&>K5wl8yfhceEb%PGgrZE8#Fk`&Cf| z>ri(oTzU5aPx_|3dANckB1Uz{qsFR}OGl)@z7^M>5Q=-AIe7?kaGZj~gx_R~6Lj-^ zK;`0D^#I27fq(4uU!68p)n@d}EtTqD$kYm^>-9_&wYJCoRw?vAdF>N_+2%R1GEjce zD8|B-)9+jDPHPya)aSEQm7_F#@W2@IUYJ6D-1mn;O5kxj!ywwxJR}zVa9@O97Z~S< zPvg$rT~UY_$y>$w25sB(Zp;aL2$#~m@5O#c9>ao}ye;T!Pii$9a&d0MJCrpJ23~pB z-M|SGU6S9`vc3`c3xns5?>j_eqj4@6z+9k#b&63v226ady@6WAbyvO5K|3<7{5AaR zOJnHkrcf?JFW;-L~0 z{zeJYx{>mZiXVV3oT5Rvz!4D_HbAu<)@ylwp>&LXtQv-vHx32yM0O(J(#MTvjP2eB zkauQ)O`_cb{j89WOxHqGt+~C$TS)W6pG7USV1-+wCfjJjnYRmCsL95bON>E;{B-ze zGkP@&^nJJDe{9v3A^q=4KCzTgy|}|RFG=R@RS972bez!!MAA*veDuHPBWR%E8~4J$ zuWjE$Q9hpch?=R5qzMgo=`Hn0u^V3CYS)^ZyKFt72YnmQ_&TN5?tM9&WEQb#Ke{Zc z!_U<35aMYyUEPq{WD#C$SZF9af<}(^l@q&c-V5(T!0>*;X-eYq`c%BL%yqOUB@XoW zstd?!@{s!^;*=5l3$v;uNFGOlH~xRpR=}3h^g0%zCNzy_o&eYkAk3gLNKXagIX}C$ zSe5L(>O1!c@1B#PAz@Q7~a_ zL2fxt2phplXk;>n%`e9r=Fj-5psHJs%p&=!c{Vngdq@pCkZ>0PM}2?yl}@d1O`SR6 zM`bKJG=i7xIGyE(q17ARML>D&A)3;6y8~|^WBy!bDGf|>#n5{hrLog~%V-c>z5PS& z_c#0#FR)6%{AZ$RRT9cPc?Dnz)L)AF9mtIhp=aCY)E_V&AZ5bX@9+EO)=gxrY866b zEqrEShB+06wWshg3 z^0!SGb(1gbzBCZEJUoN1qw-^}hxHtcTELHWYh+KA3LDytTZ7Pn^XcFE{8#7a>~CtA zc=kDURZOVXpC4i^Q1n0pI9SM9`;_u3La)I2_?mQse8Eg2BCiEmcgtjcp+V((MZX*y5Y<8pPmwS>rM)l zutWck9{U^?c|-paEyEg0iwZpZRo3XQ?>-;qMH0iorE5ThGvNz)qAtSPm{ByM<^^YX z3$H_8v0U$7*PhKp;kU#HwRuV1r-I&;0KXif2KUbZNIw2_^$NRi`|FN!Vn3~%$)5)H z+H6)SB5H&EbyLUwY8j(+_*8*t9+@xUnLr5jIzb^YI{jA$wuLO-PH3W93Tu~tc|hB{ z)2Ae)L{h*1{UWZ%(==MLpLp=Md#)>DA{fUj(@ji=%rE%#)xA<7VT(T!WJ^b&8$FG; zIgPZ-BSzG8etv)tvWSw%%@XeCo<2DoJ-?vc(g>EPjCGPJLkRi-6O!~H@Iei%9yUxX z&kmr>120o<@g`%`uam|-k``Z1NN+PkK1dT+g~sh&zmMaAEAeqr zBx(_#drkSRPnogPNCC35fURN^fbj>(2F}Wnq4({!b>A0#J=1rR(sza&zU5$mQ4%7; zy0H{W{+WYLD`}=%C$ejpxmz*OY--v=usra&RDm)#3-8d zv{MR4#^omU_Rp5dFloENn)SSu`oB{m@gv3q(Hb~@c2C!x&jN1x?@73@?wf)vu0R&w zW(&STl`3~TpCV0vYN}c|o1T|*%?ubX=9^ng@5UAMU#~=*n=&#Xu+_b%Az$k`En{`es`dSTZ!$8_$!v{v&roFN^LB zb@d8M}nYVzlka^AVW)LhM|TQjB~hPG4OClQ&LPB5_Wu;Xjg>) zh2VuPW6m&w;)dg4#Zteue*tL4{Iu)LoclhGa~yZTybulauU%?~v#7~O|Lb0Gm^Z{dNr(zPio!aZ=`I7eN}gZrJ*w&aC`D)o zD!phFC0H|#z<8JH6LSELmDyI%m9I7YX3mIY4u#C>TcD@EorF(>jguY98}o(r23)Ln z>nterfWGcQt!zmd-?U#d(D6?eNZh_P?o%N8}60 z8W?X^SBahf`6nRQs$**5qF<00&9O3n2`6<@U6rw{BUT=6@{0k_R*U9V?QAfu3v%L2 z+l>Uo78%eCHYT+~yY1y)$}pe=uIO?H^zn#`(@We78HkUECY<5L=*qSsDinax9gS+J zjlXMBMz?M@Sn~cbfoi@Bu$AP$Lq_M=rr!1qFoJo)Kq39xkb|Lq(ss44q2}g6iw*V$ zwP&+!v*fChNLjwd<@K9lv8a?s3&?1adI3rO8W@OX5Jwsp4OnI1l7FOZwced#dAjUX zoziwNBRN3|2L8*GyO@KbCz2l7S{?P_wvY$`iqNrRv*qU#zrB)BM~qWgKxhMnJtw`> z&5FCkRE10d*%P^cqQtLev?>b+d|Til_=9{ipvGgMCR|Z+cuwrU&otyWzYyR4ZsegT z2|TPCqcLXwk6rqKFy~(Wr|?I83IQ)0-2jsgfF(XxhP=>h?M_xwV018mvS~?IrE694 z!yPXBFd2!r7A~8eO=>)b%K0V9${3#7zF}{HnuoR-{A;x}8=utL-|!QFe?VyauNInZ zqf;w4#_XgB`MrKdF0^#$_mXj%O!8Vqdn;!%R^O+Df0@a&5D-^A?x3_0p1X;HZjE7T z)PcWhJpUP&!&t9bMsKf3W;@V_yOM4aHoH!H%wO>O-NJugz?3vlN04*ic(!(!1f6C4 z@yb@8B|{(JOJp4Mrhw*wzMAHii&o8}ykukpx`+(XSInsEZwnQGr(W0n7??O80Ds#l z-T2!yD6zmyqr&7G8&$Uvz#s*FsU|-z^+r!N20}XAl~1z?*_2y)^EF?c*)&BTTY=lw z2nNVr!#3Wr!!5O~+Zg09C--jZVO;TSU8F|9AM?T8!3;_}5migsWL5ts{k^FF`SkN^Ub!9m1zGlkd6@ za);m6=A%MJqjNpni?5)3_TBQ@ZJy5Bdpl?GuNNvTRnH%8U9h1|9iT4D+>!0ASLDM} zD|EecTyr-X^St8m?C`8G1S4fh9Z)V3A{9>2oZh_}bd2i%LLLNoWC9Z^{-PqY^Sht& z?FFtS5QN*@1{2QE>=5_YR05e1PyzoW=t3METxuECm6mrIOze$yRf>}0?`ZwA7td%u3w8^$_v9MFMdU!|is z>TD7bN+|UN-WNkc363XdF|ksUoPS{l+554&=!-y>aYfouH|R2|qRbKxDATL=&Y$-d zT}b^ga0~4G3Jml4xBuijZK8yQi@gt??g#3jx;04hmp**7{zhgezZaZ64?x=YTEkSl zZ2-bR@E@n6I!Q*-83_!hKjT$Ss=`IZj~7v~z3|*WJrY^rGv~7!xiu^RX^xeOm^$u2 zgQw(+9R#L;9_WT8W^-kTZe6`=YSrLceSMtdk;e6*O5nn)H_C2mFK`$W=#DH(P8i9b zUly{nj<=wyDXDFn(d)y&MeaTZl?U?~c$e&;ZXcr;DwG1RxXI<(cI-leETw#lA)%_7 zs_F^5R3uq)`fgFy(2}bF;^Atj_iD1ZubZN)M_uzLRI1x{On2qf^_!T-T)XrJJ6?OS z#%Sov7UOc$5z`|EplME8Rjlf1a?7+o$3Zh#D%D54Pd#~(r?*bpSSZfeLd@9y;W2hr z_J!?LTUSB4^yIb~$4H7fY942j`cvwC*htp*0K=6#nef0VqCiePhg!YP>j#T>Aa>e* zNf`Ame_K72Q#T&QzepxQC7-DZ;oKoXtV8Y;5|?$$F??U44KH3gvMkLb&7KI*oybhk zP<3R->bZSR64$BDij)VemQ`OmZXWfR79m7zE1tJ~=iqyK3d;Y9mjR|#^DH5n>1qk+ z?6;i#$+DwC7MMEn#1@PED^=T8LQOl(=H!xW0bN8uOP*PDOvpgZ3W92+RT$JwFV#zM zX4cEsZYiYcx6l7L9O;?upYR!`SV>|_fuQ`n+J$^>KjGNZEOXqwF#cv&s4Pma)xUh+ zd(YkrjR`IND$-3gs^nD~p@<3yMc&1!ZvCrLQ`6OE1_&e1(&p+NY@@f!diHR7ilN^; zi+ptH#9Tr>9gfeKN`$1CR_!}xNL0c^!lcLFi=HmmPl{VZFW3hG6o_G;BH%)b8jDUC zd*~`ZbT4VT*v(roVo2^X%==xRKsxRKMYRr@w{bEWKmos&;^9;AKAf_bt!=kG@f%~I zN2^vXl>xxmGRj0QE)W54T@4WDZ@cDD7s4-t08fb@UC-mVrELFfLtB<5!LP{8>a5$_ zMA=I^seipUN^Pg($ZR&gy;d#vSIltE&xG9AuyG%oSL{+ynm|`1D{zeA{3{bt^bVAK zy)>@(tBQ#CaUTj4|1-7^#v8Xgt=bo7rXC9h$sp;UdyT8Z5v)@v0i^&eAl?r)_N$=t z;AE!YU9pzM(wDFMk46{xx3HL3zvB)-oH?~x9$m_8oiR5_waDvA3V0?m;$1O-V z>tHaDvyfolkygb!LrZ6-d#J(v!pNe`5-d>wci$MU4{xw>N0Na{^^2268$$*9xk zyMCA+Jd~NbFucX-y7_H#oq+shEVHwh*U0%l+zebYCNhWl><{eRG(5Y$aKG-+fr3w+ z{@c4F?R?ku(&I#D$izP^Ub|Av^t|Bt^ezXo;0IGzH`gwh*G3k@7h!4BxxSgJe`KF~Uv!jCM1+#%XSlm@p=_m_|I0(Yc}eeIz=78hH@%b3YYey@QZdI6mWcaOln9 z8B8&6OTw@g0W;gX#a4_hdY+XYM-A+5erv~0tkBA1o4Zj*v(UV{iMpVZ!%}Gd$X6fo+%G}O2AC>)=v!e>#+Ez<2D>)FPY*0A=Yl>Z zuubUEq}|?n_t4_mHydC*l`gHH+bpvO-#*Z-azFQ@1nS;zoD*9b=*HCsp#zd!dL2#8 zUgx3Bpgz_*@O>9C=uZP>RpM^WZ7-h^x-?9AiY_;os+&j2(kI#&uq?NaTux++3(}2a z=5o%G3h(%SxrbL7j8&>+o6WjO-0Scw%034YMXUX5S=o>hdb&CAaJjz4cQcI%%k{Vl z*A7l=u)wS{VmL>3_dO~5rlp}2lf_Jza+vO3%<-7>c&|V1j~gGfywwpC5)l$L85t{e z&!)10Oa;Y^T1d%%G$VTvCGZcP-L)}i=Evg7|8x5?E+qW*@5oQTP|;A|yh!(NEy}HQ zzPXX9wZ8Xex9xIH3xGx!&pug3v^>0R3BRrAZ?rg?*}U^uYp$g||MJO2-1L{YtT#j)tfawjBi=G|4THgw z+!t}vf8$BlSH>`{PyNv%$@JxS%e5dx%p-g1WC*Ti9lA`sabtSZ-1$lL6=%uD5X)b{ zHVZkp+p+P|0b3G)O07gMEe~CM`m?MiG5$#cGb~}Y0=TKzP&aO^A;Bz0@x3T02|9TT zesfjh9dB#5P%6YFIc#GrR56Ds%YdPjTxR5zEzm-N22GaMeM;C|zp@>Ay&5~h$mPO* zY--n<=0S$E2{DfX;W{+@%<#)qe+<1GUd~2*GygOTQ)EI3?{Yq}z>El{Bn>sdt9K!O!ID_xh1xqc@Y;k5Br zNW0xah$seFTjarqwS1YwBid(Yhjx_H-vB`!IuOdwq`~#9iI+?JQmZZ#6S8$Q#JfPz zoOnQlPOJr{t804etFko{6M7|Ki2hh%DpAP|BOfq$V9x<>YU-JU+|yFI?Bg`N|g zI6>4#u}}57BBw40Pblpj*(7qd=i!z2HnqOPRo2XVW^j{a(1Ny4^ttZaJ&i|z_tI}> zb$#i)Hi7L`A-_RRlWr{LYHiuUbJflc#Gv-x$*^KUjzmPTpfPEJ!_*RPB7+esWQopI z6W64zT;nz1%=fkVI1n+sWW#Fd`|rNxTI;)U4Jvmw-zz1*mW2Yv#)G!?JF;R`ChP6n z5GeZk$8h^I+P9IKIi`HtO}pe4;}6_%_rzPD(M5@Lp@=~1Sme9X^R#A3w{$!u94$s6 zRuQ7Nix!-s0w3#l{_~`}G5U)yY7RWGz@90kOJGB3l!|r{)p}qY<-!vDr%d+;ddAtR zyy^FbLF3H&!cv?UeIi7*V9ZBmqnHT$Iw9auhNOSOUuoUo+RY;JLBPh*v2DLFP6BGU=6ZnU_F#4IqD2ZE6*DEKZzGutET&grqfAfv zuiQcO3lCtCa3DTv@x--fi;ma4;tSY`Q;6KWP%wag> zc3x&XKkU_JKRP>hgjc?92*N)Di8F);Ka-9LPj_91cDDeiZ^1M8j&(n&tm#bN09EE6_)gM^NwUZj82kJ^v6f# ze4hugvYf*p*B`XGi^_D?iwg;#@$A|#hVE?VJe*6d=WFss14LWsguU^1A;T;D@38_s zH6zrF8ENg9fLpyw??RO)9Q@nb;P*w!9E&UtC^eFt86{7dpCDB7gAZQcX!)8OTw56p zsJnUEM^I&SdhV#XY#&^26D*u%Ow$-{`v&~uH7qBgJ`sR?0W>J7}{pkR#h^|u|ct??3UkL>zc?b%`5`3v9 z@){ZY!k=n7kcwRc0geth*ulTPX_#>Q2#y2?5M_D$Qt!|V1-c_$wB=kJQ#`@ynTJ9F z{_e;F#pW+~pXDSkvZj;=L9b%$Sb|aV!^>VdG_t6ig0=e>0>>SMo2sE;t?kCEIbS`W zRE|Ffs5OT_XbSXu=lV~~B+{CN%a%3+hp92pA{|vRcj#!7)yM%G@9vwc!(-bV`<}LL z#|SsQa}&-ax^(VtMl?pDR|mHTvz6)xqa)_pgr{$>cH%AmM8emvoe4TN4}^Kl{+7bC ziI|Y?jQ(W7h>`_D%y_6=>I)&& zXuI1q+@y#?&j)dHjI31smaOh>9%-HZ=k*X;Nq*qo=9|xx@_Xd1)AJ&86Q?aAwF4G( zMZ6^V;Ef!&V+Bn;S~3Iqn~sad_<{wXiHfLydQcW*<-L0ODyz*NF+Kl_k;@l! zlm@YlyuC??4M9q6IIuSY+E@l({Wow)@~2x#gHsq$Useg*KXV3+NBV=5(Z@SqTQLMOAR3!Xo5?`-|^EX1(RFtM>ly}tO--4 zzzzYi6cbe+C5sH5FET^@m>PcvnxzuCh<{s0+-8n`0)nl5-vN^#-=B?!OCejM#qJn} z^4w-xE~)A%b>_-TR??FP2k{8fkzq8Aj}oB32%*43%oUGQxW?-++|s!0OjH7+MRPHa zJv$qr3JVK6?8d>(j@;x220aioj_kjS)KoD)mEgp2X)^f;MC@x8l%3NQD8t(tRdT!v z#Q}%bCv}pKm|DRy772ib@4Tln*jGuR?9adHrMtiKe>&b6Az6t)K(Zd#KcrSQaX2el zDEoH2%)n3Y-CSEjJCdF>Kw0cXRSc2IQ>?9Jn>d&+p{%t+>`6BSmP-VWu-v2kh5`sj zp6i!+Tj%@l$LwY0y^)RNRM96AAB0_ssfxd9 zwFgjm$!O+mdVF=+itzK)Euy~RuNO3EYAf{wzf&I}&`nVq<9Jv~K1-pl&4a)9W;I8= zj_PA3`N0fU(qvr^;iZovfTApMizJ^BXg%tRHxteVi_@e2SkAh7(+2TbME`G#^T&hz z0fiSKght<_!l>1zLQ)`%fi0R^ZOY`m z@R^JQL(Ftc6$?QF7Gr@f$4>xGHN4K^-0RGMA+mJ-Gjbv!h-(HoGp6n2DTNFwkcjyg z^t}CBw}d$N^dtNmEVIXit>#M z6!P#VJJ9R-=k>_Rt>5&$^!~Z;fXL%oAp>!jbE_djbfMmd+~s_2@eHdXMq?`hJ8>c& zpU>L92{i$|gFF+`U)FmZMJ*-J7i~DrqaqOa#Q<+c-e(vStw-al|8VV4>gyC=EJ3=! zxXynYOQPsbOzTQ7Y*N0{#iV2qK8PS;hJX}VQB|a>-|A2_+n;!6%fG)FFxNL>#BRl7 z0+$?VCKeW9dcAQ-oEkrtVWP2ta3uf-!2t)P&}a<^df>q|Pr@t1=QVPZb#DIyNj zN&2s@t;Z`0>+gY)^{vl7u11ca zq`Oh4QZdFGGa%}#ez{Hh`wTy$hd^R6<$BpiW73%8HxJ_*h6eRIF$v$f#8Rz=#NsP6 zri4J-Gm*b;>)j*a##Ka98Ha`#^UTp))~*jqXb`doZlbs;fTf`|)%Z~_XJHaKl@8?m zPd~BXOQ!@K@Qi+qi3DyDg!z^DkZfWi=hoeUy9qj@byo4b0D6nkPJ*3B%hBNH^Udq9 znu>S{my;`ZdZh71z=y-CZSP;(A@HxB{?$W3 zgX{IG<}Yi7`H@r6z1%RA(K;3piQ$b5li{CGs^!RHF13AE$u+$v>TD&fqm-}ylQc^2 zh~mKkY28E@P9x(U7VE4MWbX8l`L8zbOsS>g^n)%fS@Qs8Yf zA+M4*p7{O_t}Ze0Tyi7#tlvGLet{aMT7RVJ2>mZVJS=)bBJ^_E%U-m`x- z$_S81J^=sa55gpb7-&=wFNOarZq?|#(XDXVI@OC9k?S9bC-SiOJ+2sBGjiA3r46tJ^YSmDV z6|gZ<8MO2R#1rImZHO{?)y=Ty?cZSuQN#Kj23?K;{&Bn1AMU2(c4UZ#VO%_Umc0(M z9Zi0Uo9HteR#89}IUgbX6tNwsih|S;W%#@>g{#>&AZdJ*Jw0Y4^N;c!iAT}7MsA%i zdfljLao)^qF@I*hoiHXzbe2|wJ;*BJIC9#|K(^#4`(Z*rcCN-hZ2z<0MYOKAh=!Oz zj3aZo=+eC{tS1O2O(I)c82NRl0DH~C;6ouw4FRS82P`a@rx*x|`hrLM9ltct9K&v2 zVs|)kqB8xBID5@`wehh)a|$DVzKc%@tqxqq))XH6bQ{)8;q8yWLg1jqoJ>{8In5E{ zskr~lug7J|-oknJ`3Ue9s#3W(=7ta++#C5{H2n51!Rik)en|M=p@Zj`ZQf;}m7eWa zCeEkrN)i6_Go#NeIl?%hLGd=wq7j#P}&rf!k>d z(A-}BQfQ{l!bfFIId0ny-XWet+f+v`BPcLD7ejzFE@jQ z&ry!~j!}alGiF1L2}S*mu9IFQ zgpe^-zP*`DB(dxJ7L>&&EajvyA zW&P$1hfQsR(Jv(?6S+Rxo}VeyZK!8~`Z z+Cr2Zg0|y|^Ia4zq@yr0zx~%(C>g53zYJNO2h-vT99KFl?(>~kiEx!dTkvhu%`AV~ z3pevvH$xTY4(45rh)RZof`Nrlo!8Z!^E6*ea#e958hi}Ll0BX1%Fbk|cmAx^(8IdI zMuWWJar+yU?Ry_?lj9gS29_c~++AP>Q33aqbTjssY713Dxsc!~`{tllo(ff)df;8{ zIzhkjx^|19e+CSe9>}s7dr`M&VPDgjI`*lUGk5LZQUy__rYO?Jo{od!8Hfhl3D z|Nadbi3|A>*thd1`hmpC_u2s5m_F1c)|2LObMSc36B#q5l~XTA@shjy*S*R7sN$)n z0=<)i#hE>b8S=XE6yHMr6Xm7&KoS@W`bx{`@wO4AS;Y?=@^GiDa2e_38q8JX# zZ~uZ3C^YPtm~G{bf1{H)gTYAgy!lU#Lt`90A&+sDskFty8nNK>>C@GF=r1xxCYA{8 z)bk)y_yC^7tbdr6jDJ%(Z!$iRxGR`v9?Y~B;LykT!cg4nay+BeQ%4KGJSn)HjPtQU zj%v76csifw{jo+1!vd;^+g>6RMEd>X2jHo(^L=dnDQYe{$ zSF9O6q-BontMz+z7ym2{PBeD;D|UEH3{voQPvas1Ir}{B3}rAG`GTBBwlU%xqD4xZ zGm^%xA&bGtE}GnajY3tFLk(6;P(~u3k)+ZlvW3mP(Lf1 zklWn1nO2gx{{~mQo@E)q0QY;vU?tvlD2Qhtp?8lOR?e5l27f`l`&dWU=&g%DU8^7! z1DXU6mhL@9DusXKwpi@>>@K~UC5K%N5iSGJt zV-^Tz_&CY;-?b@`gExPEa@@QpkN$r*Pc&K(J{5cXLQf{%oJg&-Eu?C<3#yj~=)Ek_ zBFq$=fl0nF1lVW{N6iN8P0Z7ZqC+czO(v|u2I8xMV&J}H59JIjlDfY}pQZc6Pe>;b zt?z4cJP{{dRIHuVp!KQc{6|aCy|wXovplVMnL7?DQ=1$By1FU~UN4;2LZ6@uXMPs3 zF2#DyKoNSbS8L8J8tQ<`Ov#Piz2X8<=a0=22L_lKA{k!&|HIN*Fhte0YnW!}p&Nz{ z=@O9cR7#Ns8M+&!yBm>|E~QftB&B5N8oC=v={Vc(J^x|N?6scfy05!%ohKz^u`H0A z-=_bDObfAqck*M|A2KSNMa#ES(e16f`ZN|?#HeK2_SbJ1prL1^(r!ft=VDl{tEp#nD+RB7})k${9KU$f6+`CUFj56UBm zk8x?U%d@o5;wvy>s}!ufO>@2zLMbOt)3B9LpywG&T`z;rk59d(4jfE}f=%n_*KS@v zBqkM+RJK0c#QC9HI#J%gv;mVOq8KSQ@&YrhQNOo0s{A(Rat&7_i)wlguZD#X0v9)- z3OAx)gxr`E;uu9f7J^_kvIkud9}M^3xa7q3AI%byJP_o-;##A=7A^Hd?4T)n4RBEZ z<d_aD<~ zxz-hI`D@6!jI4X;%5Fiszfht<8cDLDj*+#Ck-OX`aXK3jzM+sCA82RK+u%f-6ZV))MH5a_Ae2_e`}Z( z4LH3AEEPJ$FEY^ed+>qM18jv6)F{_cS(OIzbGblt#jitI|!caLdPQ^qpTa zI8$)m>0Tdq;{Qp4a?SSc?njtDxUmUporRf12IWhdKu}hm_K)wcF?PrA0bnRec=EokCr&KejR?3qXS88>_QExV!;a)?rz>DsG! z{fAg+tLM?eXdKNe_G(FVd!dML$Y#7i+2K&eT5A$9qH@Uh?fGrw8M~BUQO_UGSDwfy zw|cWBk^dbR$N!E?pXo*K4lJ*4Gq{{rGuzt&gM=f!_^_Z28v~)&k=#qlcg%a=Gu>m( z-gNNjOQ}nls3sQS9DO)mdcbhRdh#aEAx9dguOQLJ=G+Sd^+;UmksJ$tD4oMxr=D6X z8ZIJkPKN?}7vfgyfL*XMV@z^YdUg;8z7ieav;cRi{M8qZYgG32^-Y1hyN8pDL6h{{ zkB(hzU88f1e)z%xhhr{hkN=&T9qCL2PB72tfF@Oac{X`7%I@W{KZ53knrM_%;~JQ6 zzBUiTVSYE4qCoWK7O^naF97(Y)V4Cc^*H|ZPrWAPbOpqr%A4J#R05idj^Jo9_i?v+ zaSMbFqq7l0ojliDJbGT~dD)cfYa^Z@>*+wB%WrOz*5P%rHB95iACu7lR-X_|D2 zrm`Ndh9rSY@`|Iv8^uc*lrj< zndo?)oFXi_z}Le|f^(e}1n&x(%9-%n{lRXnablsTapKH(xEyD4ibmt38Ql4;YGQpH z4xiN9Uy05BDo&k**qt-BfQ(OClO2wAxGF_sP3amU-G23|V71f9P5LutJqc)K{wj+q z62W?+L3+CNM)BSF{cPxA!O1={9h$ZJA1pwTK*_W z%JI6o6xnt#O}SD;+`zHs)yp}xMKLWtk|p^;RFAL1WkAR>^}gw`#3oM4;<>^|efizY zHc&*wQMzrHMk3SART)$Xs%K!=d10PFC1M|uNW(maN;%d$6ZrjVZ)4SF#Hr@+!!caM zCGQ6+TL|`~ssC;$j>KqTpqNoDA`8qD=p_3J%vQ2tyFVV5h-SWUlsZs52CzPbvYC`r zlD9m4W~Bsh7aX~V<;~*hLahYCLz*`zhg2uxop6ASkt;ML6vBz3VLvoM^`JMu7xph|ib?Tl4FO4I6M zcO+@Ws$6D}m9(7G(Xn4V2@1LMgGrggTe@VrIJ#^RCm;eLqw}wx8KfzB=yVmFEW`I> zfFO8;S;Bl)e6{)fL11FC_4L#`E`H8}{{@6FH>l&8RsQAW|la$l+>wc3R`Q} zsuV-8);oiquZS|C$>jpUqn6g(&gw77MBEmwha&PVN`1Ln=){3oj_`ZZ#4nPpnV7(= zELxP*B%W_xzsW9(p)4SlisgccK=vY2ID(4|=~EJT2`z~MXoF0?@ou(fq+zwdL7(u% zx;?$8T{o5C2|45XZ&b?`$CKKwo4eIL!?G)z-SM)py%G+HbLWP7fTm{thNHpD!9wUF z-!|&-Kl@w65ctn3F!8|`bRA>(5h6d%M2^4_u&SI}@)O^a_T9~eEJM^Fl zm(xBlm7av7-{M8OP{Zy>0^R$;l{G;aT=2Du3v+`E2Z z#(mR)8RGMRTfw&dphoL2l$cY%WG<)eI$Om3@}*7Sj$%vH6}XkGsoo1)F9f%eqn^!3ofw6k|86NeJsh`>S`9VuDa35~$$#dHf9XmBV&Hr5 z;h*_9fJ;4W8sY&F4A5bHKJhsJu&Dkv^4@tlT1QfU*znO zJINkzqsm1MevN!5cE$(FSge}^46{NHRwuN{>Mfp*^v<~eGpaC~WKQ=SR_cOXCZ@yI!oKg z)g^UqrpIiUXntM$f}U{=Vm)qaX6{FOIMVbhcZwxWAJO#nuUDueEOvQhtoC`l^U$ua z4KLqCI-QWEoaK*rA^BAAN{?NynH`66L zV0GcU_X>CqZ}t55&iU2NIEu9M=CpYwLA}Yz4KWqBXcQ?Dfic^Yq!bv>DW-ki3|>&8 z*_6p4QFZf-f6Qjex^^YhMwf z6v8Z)$v&^c07lIW3w`Bn_EJ-2U?}(JhjyR8_o^$hqRk}l4FVN7hokz z$wag@l+rap*JQFtR*YPj4tEsCNYBZHtTrLH2z|RUMD7$9($3;E(&+zmRQks#_^Kn$ zKAm18z-3qdPfO9m-<^IxHt~-)2i1Og0tVGcTEW=JFiXTbxqSvZf#v87t0ocEUZKKEC@q}YHa%5tiXXjzb(USxKvw-T@a#HEw*0l2mX&c=kW=(M@Q^{smx5~$dkFW8WdGUyp2B)=jYRP0InlXmF9Dwxjg zqcWWRC-H|*Nl2Si;vl6cu3r7<{F|r4Y}j{~O|`_}TqfjC_t@EU27k=`*&CI*LTX4U&UZ%L3$zrM&Vh?wVG%!g6zcj3I6<1wEF`T}L= z1}4I$olo$9{}D4s@lFD_C3Ge>Kv9ohkFNM7p77)uC5~6eZiy(;9pBBExnHYveWVny8RJ6$EMj3X&dPw02m;f9&dP6 z!CgsZWiN(l3p}hXrd`@Jq%avYlxBMi#?jto=WPVXzhVN9bncNQd3CvV1+2%rjwGlZ*E2~$ ztY`B9&349s7|DkbSXh_*TUdh>vO?8K#4F&?^@p7Aq}jxX@10GnRCS*;$=pC_WLJ5V zI#EzsAowLHg%%nZ7+cmUsiJb3+$h->{g9rWrr$o}p23}rNTN;REcDfFXM}f^_)*B# ztf>vWHd2y0iQg@efLE=Y1*wQeE{$YBfqKZnSK8e7yk^1 zD)#C9T`XyZxMF9@QuAnj^`;UmA^X-9oRCRMv=TDllA^Kl z7A)awz{M;1n-QM=LUqfl0`5uy~s za;~-*vBa;smZ^=Xc~Ps5KpqB^giQZsCQLFq9mor0RDZbe4>RrRluDGnz?+v!E!d|W z2(+1P`f{Uv!?hIQt~6cOg9O7_u>yI*Mw`Yx*PKAejWnlK8@Dz$#*IUb64Uzzk+69e zbG3E)i7?av;*Cwf=Kj-WuIQ79-&U=N`wN#JUVI$3LRIvAF+kCvUwV)qNP+8rx@#Uj zGAIhq=#pgd>n9Q!p14B@9MOIUKaQ%kJP%6BxB&b`k3Sy$HJ|B!Y$X{Udl+*7;Z|)k z=dTJFalX&jD{nD*?+}4a{vqc1CUlxk>tnZ@#o4ys7vELsj*&tpEC#G{yg~Frlq-p@ zbm+L&SF?Nlb3ell3{tQNc41wkEW?I0v)?POA^5z7FU{|w zm$O&_J&TG>WufYyNr!i90D*g2QnE)0Ffpc1fpC+Hcg@X_8*!(`}m@!ZOT@6RJP_9xsG?GI#c)Q=#d+xN0YE zxmT?d(O>=9%%46?UMT1tK^W4i5$sQhNJb@kAHLvfR|vKS8sI`~sm%nT_@C;6g8q3| zg--N^`f38L>~q>lb3N93&^#azdtIzcCQ)E1C5O~ztcs`pX%j`CwL*$Nq%x9yhCQ3m zDnYT@;#f@4kAJ+}!oR_NpP=E;_JNPk7^`OJq%s+FBb6o(Jk45#|0N8%2c2tqA?!t* z2omqHD`*+jlP)C$$XMTwk2d9R1((NJn)l)XW^%gY0tk3K1$oeiuZMT1pNN0AkiL{%=n!ZXAo50ZSSlB2xEx5}>qXf6Q$W?;A&`VMdV#u&nua5Y1 zzl#ZPI3HR3Ln7R~JP?^PW$7^z%2LYk{Ol%sphQ;&2O4TBG-eGso|z3%ZCV);-49lIPphO=!M*1+y~fwly-vlu*VRq zGNa)TbM%;`r0ooxk;gk85EOSQB#T-Du$y(!aE7nzciS=Iq{p5?%NF9Fu@4f!^=N&v zNSi=&);=>R4p9Xa*k1+1I2K-k@q(-+;CN4ly%8_R#AVx2}dayLDW>- z@s7f{mVoZ}O*t!t0fB@pV56%b7yXh}&mybEn#)driRO=f{c#WbRH-^8zw5*zcB_35 zA4|lQ9%8-Nm!~IHj^@)HbjQ_Dh#)c}` zc-G@C{B@!@OJ)tu7X#RaK^ze1K}6L#$p+IVxF(#tZ&!(h#hr2xTi$)rpUYkRn^sjy ztIJ_qlFik3yy|cbi}tzokT^Z1R$$ej``>d;k4#Q%p4AR37>$HaF4}Yk9d=Og1$kT^ z^05Hk1#~NgeTqaeJ(IVeJPw9EM``tc3vKs2skd2a&*TXD;}XWFXBxu1v+A z(10LxvLZF_`@4tXtK9Fe^61R5rUZg9fXgX9EpQ9fw$4RHfYRoEI=tSjgUq#0P_cht z=XY!gKXNhwfK@1sjYOOlC!u^J$^P@$)|d8LBWCKm3q!-9edSLS+=DU`KaS*YE7}Xc z!1xpu2tK{n{o)GKxMhn>a6iS^I z;DSqsw0DqzK{u$jG-oyKw!B-|9&CeuvoOI5nZA_bZWOo!q2PKozcS5-2TuVvAPo0T z)3pqMzYRiQ8*K%0a2lSewDgtkZuBKaKo$!8w-ccMlH`&)!QeDZUa!+di>kjs@V~`t z(5X-IPpDR-Q!FV7-B7EFo$603>>XO)I4-a|o{U?h8fqUs9V8=rU(9ULhVS!L&U8PA zh?rmw>iRw;33WaF=Qiq#;kf27%Q39xr4aj8uH{$Rua_6IGktsSz$7w3k+a3ESgB() z$|Obdlo2w z>-G76*K5^Kbv}FpA=pAs68pT|h98qwpk;UHwiUj4T@w_=BbHDXFZ#o9nIjcf>{5t7 z;kFR&%Ej)OdOX@&SoyCBJ+f)a=Dn3J_dk>H2c1$K9bZ;a4`2(cIha~Osi;IzXs}ba zHWy7mep4}-$m>wK~2mzYCb;7y;f004A?|K%=VyOxOR+<0ErG)V6l} z`~%&_Ki*3_jdo6*6s!)cwD2cW)nCB1?YsP`5kcxl3#B=mXL2D!2(khMkElD{a!ly@ zk)YAB7x!4NEGW|YsXOaTcKGKKl%Vhfrjfx<9YqN&NQdxV-D@2K`cuD&G}Ff*cerOj z`2JK`VrBzF)`P-y2F_n6bi86Fs!Y(gG3}T$IS{gM9DsN6b9XyFkg2FF!`e_gjoLVG z2OPRk1Aj504<7WfNQeFSSfnCDe{+orcD*6Xg8kFglsbMpNVW%>>^Q~?Ao)bo}8kRcj;fT~<k_&u3R2+)a^&jlU|Ns~jHpn)Z9@o=j5UL`8-$Ul_7A_SbjeYOvp zUB?z$NqaMHoXQ~|= zlUUR7NLqhu_IK4u8fi-d)pmB!ZMwamxh6r z%YuD-t(2$%WUbG z#>kC70L2f;ITD6~C7?v}i)Lp-?+D^w7!<_8I|8k+$%9gruDXwq8Hy6Le@+zpa^mxb z4hBVaRW#v?Po~z0i;#-s9nuHvr~7MM$E}e=Llk5n#_|!^G8Phu(in=j3nlbpvG$nt z^oA^c0EmM9nOe<@78}^4 zVeir!l`VLv!9 z_{e~V7LNQSxD5xVD+~s5+FUt4sG_J2O6dUiZd8l^Rw9WE+`lYo*Bj?a366n8-DMW@ zX0RHn&{LXm{&z#5cCsE!kBL#Ywwf3(na~B0)=bA^nCdcewX6&uZuhP7GKNxFvO9Ep zIx3<&zm zq>6%wfNU=yGMB>E&K`ZZe1H+sk1)P?S@~PJ^Zcs-hb;;w z?C~KC>O~FIzoj>`Du-C^O4X!$s~EpL_L3ys2n6tjt9+sR8J6 z$p4JY-GjT*nW4?j*7;8whaShF@+p&`>}{!GS_^Qe+CKf?gfpR2L)wA(PP1*ZU89JV zQs2V%(PMH0#b3oQaPw9bm2gSSR8_t+k-Oj4vPDri2a(yQx2oIH$8xF6{9T;o0nDjU zhA+9-Sl7l=l)HBX3`$4|T!uqV86j`U(hw5X8Cn);f^knc7D`#~HcuQa1% z=%{5VeS~752B!wwEhB9=Hy-Dl0d%-aT=-=t)I$CN`%F;Qjl`>)pQ_nv*`8J52(<$O zJlryjQY7OEQv{t{-hUif=%ij^??_(1zmV`K$qBY^Oq6)k;A6K}&~L7Xa7h+~t(VxU z5?3ldJOMivy4*BK*`ZfCGm}4TK|si;%_D){c`_LGF-TyYzvWPZLelf`N*WCTj@ld- zyr$5S>|kO3dYtJ>0&6>~x;iA*kzr9TgylvqO&R=Mx;RA*?b@)B3<%qS@vETmYi^rP zLjpLrtbn^47+jjCL!AxW&V|#o*U%?2u{B4$f0BL3`iJ%grlLvs$^uu7B=E(H`tOSuXu9?51F|i{CI+M~s-U8anL0$8WH(e>ZCiA^P~V=pXa7(U z_HcUv3&IdDi3>Hay#N|u{F7$Q8m)@ri59>d(~gWt^dYF9^%W1;eO?zsKA4|vQ#2yR z&v}GS>!D>sufM}KZcaDxic=Nnk$MMK#{0uq2^mtOvciGphjC>9HuUAx)m#o~R7N{S zZH<>z`|+FhcOdry9&4jPPcVpf3uT$r*_kc=v?g)|$tDA&bSH$MtWmGU%m3kaaW5Ay z?1}7;T_hEqL666|vQ4Qm1t2--t>Tw(_4*7Y6F4!N)2TGjAH;>VCZcP>nkGr_ntmia z04A%xR(TBqmiQ>**EOr^y{JsDurK8Ve7TSw!NZ?`%l2Bw2Xi1#fngoQ1AZ}cw2zc> z=54JF$S-joXoljZfSZ(SqzJOvKbU`Tvi0t!(!O%=uI>3e9U5Shg$r$Lzg3<4ddBPN zAUU-3A=ExG`Ejn&haX?leGE1BGl$Uey}XrYYY>%G)cHk?WNIAhj^44@LRyN?HjVk8 zBI+_GBm5ms?GCv;pD@*zezgk9XoeAbV8cicE=s$lKi>{VhCjWqC;~FmGi+>nuHNR?>0S9E0ZLQWTS4)G=9j;I6Mf=9ejpUO zI~(T3U0Pt51~oGKQ9i{Oe3ZUImd0I-4uMI?tTY~7o?wA+H4+9_XBE43>9vVW!t-O9 zu)UwsYR|tAkZsiYh%YJrRzPjsX?J=4n#8rF$ne5j@;07N_s75lk4!HUaLCI4|BzYN zYPynwNx?+L#kN9N^^{7tvd;x_*3uC+w17onk9%vz*7m~boBHV8BV{P(UJ|Su6D~B`jB4_krp6W(!?h5_=ba>fFxwPn zQ5QyHgP!{7EK?I1KxYVu(zd*89rO5pR1M8FMU?KAW8A0ZRw0AO&F1L+4wP{F~L z;Q;h1_aWL500@wCA@kpXEV;||FAw;Zb#r@=)Q(e2%P`f*Nx~#&6m|U~1k|+p6>C;Z zpU+mvG#aVVbzH|e`ZfJ!?hg+>yKBz)HoS|4Bu(Uz)4GY{2u_IBVbil72xo^UftN+I zr=M!kO8?b_b5OaFn^~#cS?1W1ofHCpP4XE6FE-qkpGF|*I60mpiod>}NpWE-9QDO+ z#$SWk{*i(9!29a3-*1*Searwhh3}1~{_SFw!-0Os_BjEa=6iEL%53+>(qX`}{Sz03 zPy$4bekh47#S{rrkZXtFmTBkhkLx=K~1 zC`FL}UW`xO-Do+Dx?V2QRzZ7JzG|WB$YI|3#>pT1|1GFl5e4HH{JVbIWvka1cZ|f~ zsBQ^OAiQKp;0n(xM904PdS+I^1mrA0e!uU)Oy{>#>iDZ(YE%mVijXJ?ww8?xxj|T zJ7CpQ4H}*%+L@ZePGYohsBHA_&mI|mN3G^JXAZyBxW}sllskw_sxQXgXgjo`N%+ED;gk=RjSLa08l9^h-_3UI)seML6aliCXEKx;#g=Hrp1SFj0Z z%VYso%kL6mrB3#vca}nn3SbWQA}bHQ=FkM$SQc)rPfgLc652TED<(hD_bVMFj@(3@ z-5Qp5#GwvB@7FtSu6{U;e8u@4mUMb@h#V4r(({)ky6GE2?pLzrk*V-(#IDu7m*1H( zlL){FS$T-JF3-P?Gd4}($@^L>W=^sMQY2DR^HPdp^P|M}cY?11u=I%248J2%293$g zvj0r=T|!-cadot8zTjcQxq!DIf^eRDeRKJ^3XR86;K986-%M+hmdc(Wdb~mUgC-p( zA-dzh%qLzf+#u_q))os~D4kMt?ZF`{rBM(1W>(fi?b!ew*zPwkAtqNn?>CHOHS^%_rZv~Wgqkd1it^hFwgK3J4f<11~nV38GIK$^decz zq{E}Moj^seKhl4YcWjhGj=f3!j0d~W9fM9-cFW&pw#oUfN|?XOW>QU;^H^TvRr0-D z)Jdkl@gF+*V9PrZ1Gx|0)&Kh8EW;JMa6?#xuhnzEF_KUa%acQd1r^T);{ad1}6vmYDfm$|^7e8AZCQ+$f#n1({c zc(pGFK6uPy7e#B=N6a_dC*N$BLd(-k8Ffc%M`1^4hyKMRmZMlJs~q=v z8Q1dnq?9K$vGMRNP3xcI_Cp`0O2TERsM@MhCx=2r4)m;-8@7EjL+m(>C3P=@m$)FN z+gI|X>Wj0QCoQNa7_0Hn%PK#Ok6jeypUjaSFx*M@#DYyu-9j;CWag#R#7@QOPZ8*L z7H3T|xZy<5casuS&r#JzH@epPbV#@I>oP9C#l3Tr^s``*vmu0?@hNWy`S@Nv%=O&$ ziYe-10r5v^w3P-^l+D%u3Cg;?(R)kBHK!^#g}5N}miFWp2aT1M5^R@sHcJ%=)5`Tq zcDn}lD-A1`dlS9(A}wn3{(KlfpxB{cP_*2rrCG#>u2~h`COnBq4n;WeSJom>i2DhZ zs6dHCkHl4gZRePrTN7dase*EKRtjklM@SQlh4X4U2K3UU!*cl`TY~Tv#srTg9D$C3 zeTM6WCek&9#Ovi(kWs4R9)TZNIRx|q#W}R?Rakb%4+b0r_(0DgCJFTyXqyLs9#Oxy zSO7|x69n)BC!?%RD48&i(YeFyVumuxr%GcgsF7qWlZYR8-BZ#X!T7?i$7Q6RW|_*3N4hu+_RA7KlU zIA5kF8{qyl&C4~ws#_yH)uCI7&wL)?-vTKN7u)x!R?M@^Qi7~akJ{g7QN;!r>>W4fvXB@;nE_}P6n5Y&2+s{}T$+I(O6_AZp4c$uIFoQWWkcR!wB^l+>j(Hq(`fhusZoSkxm9ymYP8uDWL zPXESv&wf2rQ}hr`&rkzXwKUwIbl6bcxyzCkaF#!(|BsmIe??)RPd>f$BUXbx>7&vv z21&iMNZ1M|QrVx|i``PaMTPoqlR!GExA|&FNb8Y#F&R!j@B5GDOJe2qq%gt6waZ)( z9Mkj8hn)+h*^qd$H36NSQI!9#Fk6PfT@dCM2v-yAu@aHjf6m^ zGT**eoSR0;CE$F+1-q{Hv4_3EVSq%t+ESTLb}$M;OgYJHd@qI*rM2^yydl$U>eo&z zA{`Q)v?sRB_r|_yA?I7vHo~oe($rNA=>@;dv^=0jv8g1siPY-%i7}9s1IumQmje%; zPEJv6jmd7W-_^278}?{)4BA7bAt(wEr?(*_gHhrsIHmg{N0|PkKK9ocScT`GDAS1P z3{hN~+-~9>U;P-KK6x*$_CA6Ji++8nijY30VmY$4Nq+U?zX0jbPPU_LS_$u*i^;QJ zm=M1a1MBYEV}fE>Sx967j7QH9FfnqNbPiq~O^L^Nn^i{+R!lWU_*{vp@toz}UW`e? zhA1O9?r6A9abm*#aFjr>fX5RKOcfu2AK}YgvSeVt;2cOQqKO>CO9;eK{0E8KQb-pj z=gY_R)dw6pUWW^%zkfB^v7{CI6P=K66mL`bSIJ7P(EgO6K?!}(DNY*h#i-{2j^c#WWm z8i-g7aJ%>4nC%fCny`R6_-kZ(u?hXSkCmZ+^a3HoTX5~D=fNDO?)l%@{b`u z(2|)T>KJ^Xn0ULf9h)^|;4vXco5q}Pl@Cur&35eHNph=%Bg$mBZ+FrCH6fLvN`xf? z!Zqt`MRlQTw_F3-`F=GT8;P4We>$5eg-HY2kce|~A^aR!awWj}vL#`!Hok}ua8QdU z;fsd<3RWGgPL|F4vs1VcTcvdV=#+ZPtNG?q*dz5br~QvMHCrHo&I}3BPwEpMnhjb4 zR!kpmTZhi|{%tNf`QcFEst(e-?16=AJT6_D__(TWGFWJ3~cPhca-{wfkifJ#K+Gedj&t*me^^;+yYHv~x zkPI+T!vPu6=O3qk07L6C@y`(I`h0PCtr{#s@#6v)|J!scDu_0vNi-YjTkeeJ+P$kX zp;0zs#FQ2M*HL@okiIiAfW}z zN&N)WjH!rk?@Co>5l?m>KerSm=s|*%F;sF<@YRHX<@l_?h1Y8#HGuTQ zPAhmvC0(7SVRx^}={BO;Y`IEz%{VE2`b7p{<7TKoHfBq4`u;}zh$FM;L2 zkinI(ylnnwM|N2<`!@Rx_c!1m;Y;^z0}syk&bE#Adw{bfWR;8p;5wzgO0h~wq^^IM zYKE_AH@wlq?_^q-ZCiB`UE>+FbWUW6bt@R_XOW@f9P(akdE;v~x3js_Tf|F&)!4^z zIa5}c+$$z^MfgzwDy)%$7Hm3}+ho^xd%OG9ax(q>M*0bE&7x!i8B>X4z&p~AG(m%R z3G9S*rXBgdp@Cw=`O$r7S1lY+>3obHt8Z|vUpu|u)c#V;gd?CqlhuqRb!JZ`pS;bl z9^1x_MC;fZJ;z90j=U{U5GLe>mVJ`(x-fOLMGN3(vU@zzYbcY5{_Ii6euN92{V(Ky z>6&&lwP09c=S&zqK(Y~e?N zAED6C6l>XDqw_pAcK2aO=S1^i)RQpcxPE-87o}_@*Ajf;BF8%8{1#}^jA(1^I1V|& zIZS(hPOk}^x&(gQzr-v>NQz-|F%6;S8(x2uP(J#}rZ~eE^}v_(2R-v14jzmlVMTEX z-47j`77en^sJmJN_P=JLde%YOmeQ@L#VA==K27b6Z~2X}en6$u@-A(1q^^aHSSxps zDWA0721E%z+q1K#I*(cLJ1QgL2ob1U<8(5H&MmgP$u?X|x!3rs2pW2E3O==97|@*J$6MeH(p4IH{_tI)GS`ilYe|pi|6U<>}<+bDyWm z?-}z_U*up81!UJMY5ENoA$D)Zu#yXbvKc66E}nzdza5i+$LNsRS58vIA&*VC{B8DY z=2z8KN!Q^gLX?_2WC)sw>&lk0_1i?yz8rsVxu(|IOpK(kE3y*P021=TR&d=&u&1`& z**+<4w#(t_DDp_AnA7#s2zgp=0V;CoA2N;BI&mm+qxzh9+a~&8Ks@+ZKiH-czR-t8cGQtPXwDo z!K4fRgM8+H80{uo@S*vQsR>3)%=tZ~0n;0`drjHW$`LFSSnio?SMg=*K6#x!#!F+C zj^iS6*lK*76KN1|2paK6y;;e+ZNeLtv^U?Om{nGU(Wd`onkA)K;xw$pO?_rS#K-AB zYr)ny1lW8apDoVc6)gd_}{ zg*l>S5EVKK)4#ec_F<_P8q-C~_pK0EZnx!P%Q~cXTsy=J1B=FLt1zJOP4FfGw|-Xy zWgY*sFqU{lrao!BT$jl1BeQ4(f$#}6?$4bJCS-S_J=s4r446vH-S@QZPCMLLlokxX zJ}+4F(L|lX11>>t2#z-cgb$#a->;FI?djnpMQ2isR4ghDOjyI-QG9I!Xq`n3+~8!>PC8>pA{3PPIYLhLUE=-yq1P}+HIDMdQUj1&g0BSNLLNQ< zPURj`as7U6F|f-d-D(*L+M*+Vc#6$V@sYEU2des7B`qNTl8DmVBLBXpC$a%xUNBg7 zG)~u{6-9;ZW;7?zoug3`?=Yp-%zQop)XZgc|C$+@Pwkw2mp(N(4sH0WArexpO(91Y z(2ELdpGgg#c1N{9q;&1XX5f|zODbR@x1eOnvgFK)NkC?zY@Tr%iupB?UX^WO2z;t zant|(q2;F_XDP8hq?AM>iUewp_m!kgQ>#x@+FbYF{7>PZ4Zh-bIMB>3Bsd!EH=Nwm z%(kjy#?r(keht-}%b~JUFZsLtqT)9xF)9Ili4>@i%vXsR-+d_d4TUbqUW#Qy?o4NF zazX5#ZLf&9tZ=2tLIo71XyB4-XHhBAXozMfF!EvSBtK(*WHFHA*KkWiHu zzN(bnR0V@URwlxHm3R4WX3Y8N5cT4a+Aj`UBA=Iquj%y!!^06n_T;s6AvoAXS~}eb zyOrPG#4yUYI8Cs^-aK`xed_6lN6& zNgal1e0@c5#&C zVjsXm4R|q7BvfNi`RuOlj(cIF8PIWpJ28=ekbfgZ5XGe>vn77b((YzJCjQa=`(Ap( z9rld~=(F)ho9{j{tL#_w-v;AN9{%hmF9l6Ar+wJ0&h-CGYGiTUE+8}gX#D&qD@ld>rnB1QUF68 zk%PbO+47m_#nS3~w(3`8m(#)Juj&Vpu4fYi&UsTf)lbSx&>Zl}pGs|;c-a~l{|L1K7?W~HRLA-#=P~k7*f4?=K z=mhq^YI;p$YVw1Qw!LV{SJXprcK-So1yW&M|5LnrchLgYivhcD#@M<>9&b6;SkAj{ z`;Qp#wA4{F1Ns!of@P65x?^cJ6TJsb4vJ&Y5Vl1{l=^Y9>@J8>2>ihoQhYf9@Uye+ zlV_cpLbQnh?%nMwL`R1J+^?8#E9D)qx#YNQJ?Ji%gq&H+ey?9sD*gH4;84N3L<#&O z-XC@9zNu@F4=9>lq*kwET^+)HRdL5fC;UR4%Og$@I~Ww&Z@evGn4sO7Qc>4Z zHtC28RBPp!trI@4L@qL4_r?FP!=gox{`R+qGDTmeU4_sadB0G{uau0q;VAM6&&fn+ z8zv7I7pJE`Rs#xD=bpmCTi)$HzLJSgDWUB!$bCr_wMX$VxAXYIDs%e9pm|K@$=_D~7O}oL({)AfLveDHZO%I-g}?a$a^?=wFQgme0cg{ z?gFy8LPk(KE5*VcvAa+k2|TL8LPC*&Q^rzR?4n|3h$DCW>EZc0hZHszj08j zx_z*{3PYHEAq19>k#o%a_}%PbM4vA3MS*c%>g!BC$ahjZk+aQvb(z8aD))N_Lb8ONHi6iO z^WZB6WOkrZj)Z+2jBL-hH3imId7CTY?#nj})9_g;&i58n%_4Y6{h04{yGX@sVM$DA zedLV>djSG@9U4`eL>AQ3NFj;ejzgGjknnuE6BKce~*KeYZ22BZrq8h!@h{ zo<9n)Jn-7UAEM^Nr@V7tbM;>MN2AWYnF_9q_(a?8k%(2Q_VmGOI#+U}G$l&YFlV|? zVU%5a7^|cCt#y4Ih!_$WOshT!Unu{uHIEd~(Ozobrh$-u{eUmyU=t#T ziTwC&LWiQN(%OFW$Hv_SII}1EDmzxw4?*uJ{K{VV)3f9JdYJgzuKH#o6IE33i_>bJ z#4j&;Y<+7OP)Zolm@{%vW`7J0q7~=xd(texijrlMCE3+eW+HX%jW|CRf2ECim0y2- zY!Y*~6u4V)?!|w9w$d2;z%OeRQNS2VW4h)j^3;!;s4_SW+onHAg(chBe%0GA?(bbL zoj8XZxq5f7YC+G-URK#hi2_}bcg*V#^ZBv=c~Zoh9*|`>VWsZ(I(*a~C_&nNp_U`) z5r>j~eK{8Wbu4j4E0xYx+!#ElfQ2`k%jRF5I`{(wR< zL*Qc!G@SN!g6U8Nf`@Pr0Ji(erI`8MR|C$>;_ItY%1dO92P>TU zZ(^Bi$;dUQ8O~XBG=-tNH($n^_nObQPhXi}CmbyUgVW{>YC7vfeT-MxwYY~q-YD8A zvSaqTm2mn{^}tk8-;XMetlbV`ZvNhmkLw!XVKfb|4eWt) zJo!z{G|dg6>43J=*<7{BdS!RDdfl_42BKSpF16#?``#i@Ms+sCjfv zxInF-hsV&)k4a})06U9!7C5rBEC4~M_~O}srlx0aL0Vsc3{X_!mFG?yD3&il1}4Bg z{M9+)Z92xrs=8do!V0dBb5-9k_gfa>IwNI3s{fIg-cJVqvElk3;vLUrAzu|Ti}Hgr z>?QBCpToh4{d7xZ2EqvKza;)GDJcK-Xv&L?nR73cZ7*Eaa87rBzxqQ^xAK?5p7+ns znd1?Y?-Il`S<###hRK>A$Bj57E(Txnn|* z2ML3zLpiYpysAuog#e-Med}9ZjTOy0`fqv2!Q=y4I<jnYBNERGiw}H`0DbR#t#4wcAO4G*+tooI=bkBScLlqlo#6 zidfE-e9kzV2mS-5dOI2YC&dNiwR9eOFVl zQiJ>Q9W>1gg{2Q=9-ISJl|J6Ch$$#z9@aZ!>3Q7p9 zc0vY-*6=9x9E@idU%NtFm|>l=y}&=TKGDx=y3yN<4BLhikwe@~ezGH^PF+9kR=q1d z=XLDjDk7RopW;LX`IyG}Z1&MvcJ;Mjuk)G#e_aKTgPKuki`q5ypv(EUjXH-9rzAs!tt39%aKQ=BaDXo-o;Rhx2qFH>HYx<*oT?rzBb{z= zaS&wK+wABbeo^<7gq<4}Vimz56s4zS{^0XCJJ4?>|N5*b!7M50Tdj9FDZH6%FFDDG zInGnp<(o8qkS@WnWi-3yj%J`LL_Bw)uPG-POAwL!!5K5{m_fmppif|TYmOV_<=eF# zjwd=CP}Ljr`4mFZ4tbD7jo@!n{ z7+!}zUO1W5IV)2nANTzKvl4Kgw&~aJB`=J-h;0yy%@i z+MkX9IhzoJZ8?`;y*@_+NL22HxnH<`cL!0PBl8~C9fX~cHf};?1W0gBS|{Q}AtcFF zkvT(5$$(^`AQ61HuMhN+cl<>%COlHDs6q6;Oogf0I#iziPynqMD1E6tFrZ!oJAfxV z!NX^5b@RX?4r@F6$gvpnOm=k#gPgvM2&*A*L&J>9r%6%=2D8)`-+98*d&?EF-QpTN zR|5+CM;+y5w-|Tzm~H~U1h;1Y_%(2WGeq#Bf~!72Olsyu&v!<#2tJd8XZW*+g@wz+ z=>DNzj9r^+&ad^Oiud*+v!ptGWPS$v`eAEg>XqSO!_{06Oa@QMfz>3}?<`}Tp?!>I zIQlio;%f@1(jpu_s>`L8+G zd#Mj0%(W#NdtJ4eh~TJT<4oP&d!+dWK<)BY!`bgK8{^}M)Q4l!*a_L4=oyaXB0UZJ zz^i^12cPtP_6D>bO(U_62+7z4(^POJjhdmp0dazWnjd2#{e?h zMT)T^F3K9=7P@Mdc!Z|0w=+HKtv}!D?;CHm^?roX*1Ky~B6cg|vNDxX@G&eqrT^Y8 z$5-8BBM_x$$MZBa&$fEp(mf0RwR+lqo5$2WhI@Pbl)^vd+{np(ZjYgmJM_*^i-TnP zAjI5sWH%6gE1C02{UV_KeOQ<36~z-}r@W3yDWPU@;;7z*i;th1R{1USf*3FbJNDCD z?f>eQi;mk8S*sxpz|1$aW;DNs&LinKBrSJBRX|37wG#>n=a-cPX5neUEv!VvT&2$i z1Rv3G;xO2lS8pKM;n2>~$+G%bX-OpD)#>W0p4F+g{B8`7sLrvRwGAeHfiWT?g4o{* z4tbmqYs_&R_jc(XKdbFfyY7~&WM3LJZ}eO}36vv4kRRofC1B6 zEAxAp$h7qlzhPM0-C_ZlSbDZQUrD-feCNMjHXp8OA>Wrmm?&=(sW_pA&dK3P%MqTE z>vX?N(38HPb z_Z47kf17+(co*Rz#0cuk4!qvy)=JM@KWV=$4D%r@C;?4nzje{@8^nn*wW27*jZLyh z5c#&dO6zDlKJNmZK?27hTox>wxQH!-ftLW08qsCfQ6GQ1;ub8gh?JX((~3tq1?XxUxa2r%jaE^;zwiaLB>x$>mO-qioawRvyzJ*{KfjPBcw z=^J5X`%zTm*C+!0{09Y|3dW~4(?5=;?S~a>6P;AtX-70hH-jy{TCA6;tsB4ZeIM$R zIRt6f+CZLJj5e~?iSASRCZ-oo3p47*>49ecRjsd==NmkyH(Vx07cyNF8X;gILQf`) z0d3`RvO}rUJiVVVXvWdVt%Uv3KAdP*N@?~E`bKbNwm50!xGzdLP&WNs#ETPqIKSth za=vSsgiE_ru&#dL^rNKo_1S1e8AJ)hoVWfNyhvs%az4z?{Y#3XWJyRtjPY|GA65Mx z!9Qkgx-(D5pC7ms&C}J@9ehM|ZLT(`@T+1PLw!3vX9J9D0$**;A9YqOv1+_V)|XW&~Xe+)XYv6Fd>+}=nq0g77C zAU$o6cJOcN1`a};C6(It0Q&sEX-4)!7iNWS=Ojt~cWM<<-mEA4GRe>+rfeGpqla_B z8fKI1AewN`5-e8u+*F&*cE`?H>8Ni1z{cfSONo9o9KnRb~B4e zk&eNej0tbs?o8PD^lDrlo`f3y^xc$7P589l;cxS#Z>lQL7GEzExG!iRnX zcaxfdfjV6Kx9S++7~rCO%s{=rB5bWcx&9GTqIJe^-~{)x3`^+t(SY1t%lb-Y8%4E+ z$7974t#wM3xSoRv1Xcz(;#zgXhjDk3bhlBgF;0|QmM49;Jx!Iuti4W;W|HYUUJhL| zd)Ua0-7C(1W&8D547T3YVS6J4978Y{A!9SzQ{x# zQ+5hGJA&6`cP%J4B*b>C(-?3<a z_po|d5j$o$KrRmbQOfeG6{zt;=^?_HSB_$C$;7(?%>mm}jV$`%L_aa*GYo5&{>sa+n_9Wh&Mf!o%946m%n%{xdeKcr{Q9`_HZR%><^88}yHi`1eO{8pepock zZ}+qrISB5yJ`EhQdb=pt-gv(&&|K{s+~v) z?5c5J27v$Lq%SI5HHbG)=UKB&pOq3F`3Mn-!T$M3g(2CUI6)%#|Y^N5y0%C+L7so~_mAa@TY)4c(3Cc>?-tdSdN31QPqUC&;q|$ z{ZiFazJNK`tFc}~#eOh$_j5kasA zSuruDKE`)>>Q))#Ap_xk-77jnuES@BlWQfELJn!Mk!ei!glr3Ued9}_(pn`&*> zn0kkCByZyPb-`-gx5u+73oi>9iN5J=%xyCRt-!GBW@O=p-ZcCpyr$&CAAOdxh4}z_ zt%w>vY=R9sw(wdKDu=0E<+N#hflz;O&z#@rQjMxeiF*tv&TA+<1s{8yK zRv9T2lcrhP0-B(KWI4)prmrt)qOUmqPomgs6xpB)hn#i)<@yofz3cfu>BMcGaQnNkh^BGC8{sD7uj@o+W2OW{F6LH2tN>(nxi!TWfjpTSScF5V?dV1ObL_Y9W2&L^$kEBGl& zDix`hFQ284>=J|C?fS@sor>M9^==lvk2_JCl3cG;M#39l^U@*H30!zWP5Upm)q{;!mhZrpo&Jw%d;?8`aS2 zWvU+kw~a0f`e69YO33`*O1#4G23IE%y^n;~Bc8}e2EZ)W*~mnlja+Rpk;_HZ(x1SA|O=2 zlIc_PU(QpK&`2wkN*zp&wxb|@Z{tYh{{~(UwVnnyj1P%#|B;)7ESWiSMIfvw(fe0# za7BK@Z2FPWlWc2%Wezmq$B7iR=`-J~xLZ5O>Yt=#TKA0a3L&&zqI;f8yrk9|F;|2T z0~&W6b~cQ-zG-IkOK#2VJ{;#Z@2WSwIi{5EFH7u<8FW-=fDbB59M1@pwl#9&2t;Mw zH;e4iHI$fI&nu^&2%4_Dq*7_iO5c*Rs64-{Z>8mbFB3N*^UrJyC0=o~N!g_G_y@Pi zB>v>+vOuu5Ly#C|e^-_xXH_d#n1b_*j-m=y#rcUv?VW}iCKxkCdH7l{kxoKi?F5X! z3jkN=zB-gUTf&7W?#8(V*~+9)wfyatmk{7@!9RXTV3T6THp%x~i@#^h<`crlgAXi&|l>dt58MUz4wsFD5}uK;U3_2xd+>rmm5*{)fki z0Isle4x;wv+gIhD4D7iOB&s=KTZf%H!Jl_)Uf4lY@W?aho!Y*kY-gfX`@Zc5osBMo zNXJ$K-myYVvD{jcD#!ci$}D)+9R?%gK5ZK$u1a)8w+l_5IF58?TJcQ`dDzeFEnsi| z;{TOzCta9N0+X>fP3k%Ao`Y@TU(eT7tgg7f6x!bk8r6c}ri$m@HSop9Ty1RM#wvx> z=326M@rHNfTjheXkpZyK_r>I)u6n-ChZ*Yd8R~k_8UL_+==0r@*lzIico|i5F3?}L zzU%%dy7aR5kGiI)Y~4%*?25uAeCM8Obb~H4uu)B%oLbk_;+*oj7r%3wvOVss*qJ2B?+!Wle8->b?IVcFRFghjN(IUO5u1dg7s_FKK!eczkAh~<}XoPA^%Tmb)QQiK8wv5?Idrm zG#Wsdbxh!(hgT~~p8uLl;1EZ+p*oENA3-An&w${n48A(k+qq#9=!oE(e=ooGAOh1i zw%wS&JKg-jzk*^|#fk7d-vm$;L@9X1zdZHEb-jmvC>NeX4c6uq7fSlvXxP{}HmQx0 zZ-4i@&R&E&rqx^qYBhizm|w zBCaPeNjE6(Bzv|%y6$785>pgGCi=)be zyfo;=FJDwOerrqFg5$p48lFa9dbq&vs=^AAuloObQQz>GdR`$C_VA`#r}99A%BoMF zLQ16k>EW!wp!p?{1jf8`{LAj#Fq`gM*-vre`w?>^&;3VU5ZmzfC#UT0eU{VwRG#p|~G#yJ^@zxsiKIZl?p0uUooEGHMVyJ&3 zm_6G$(B_&sQg=a!V5nA861VB_ zp!`v(Y&tB2`9|Y{5+7SSneAZyh^?aDqcxwZ8x@rtPV0pqHHmME$N zba!Mx2q)C*k7FHGoNC4%53LFl#vRM$=0EMb*a^LXn@APl?(3E4!{ZB_r^o_)o&xT? z9qnHIl?u`){KOrPmks$hia6oTX0D4Hl*e-HxCu_byQ>ldCBz~FPRPOcY@1&jE#m|f zANDbc&Jx*8T{zC8@AIX;D%j2Yzrw6zf?2QtE2Ym3)n?VxDd3pL=;}`>w`>p|e2qE< z1l6Jii)MoE08ICfbmV zB3l(NreFCNat0i(WeSk&R325X!sOxJ21XAJH7X2()?$vU7`yVCe&qjAuh8F#Uq6o9 zp2`a9kfeP4L^$k8OD1utph$xh%*!LoTWCSesDwu&nGu324_u>|FePt#_y_8=CXf1@ z<+I6MqsNGsuaZV>zxN^eTatS*T-@m*u`9<;umM#f%}f4V)b!&{vH#-;Rqv)eU@cQE zN%~rmo={SI9AQ9ZnX@>S8c7!DkiH#xcNXz2e#^JvAvmznD|if28K`Yv{UviAQ(<93 z;4bBS{cUwx?P}CEJ{nguZYb13KJ?hGxluKL46^Ln)zv5E_TZv+PXj(7*QT4|J$7;+ z82Z4lg9Z>*Yi&jUcKIn>fk0XGmy;K~74l)=D7CbD7Al^R`w>Io1ju_*M@_HO2(wAG zRp_L4gkv*5wKv>FvoN0SInDSDkov(8kS1B{@aps+eE$OC9A_R#pZ+}&e=6Gb`cbZ~ zfr8>CKH1_-^kXmgX0B87v-+W?&`RY65>+Qr(fv48kW5EVt@OLEeSBB68B@%U$sKnn zA>&E2(fQ7iyn)*N0Kz#tBMKA!RsZ^hmORRhLTx;z$;WxdTO5@QCM&2!)~>JsSA!cVc+oRh&Y?7th`o-H>2xF##2y z?e0cb`~)1`RewL9eWm!p_i9*(^Z1#c>-W`g$`hhC`C$SxwL)IDG?+W$wtcA!HufSy z{80TSlK$&>g>6wI`pKTKmXFw#OF2jL0#<(0X=H{wD8NRiJO$c#k(UhxiGv~4?%WP5 zps`wHe%Nd3EA=WZ6@8(I%HOI>W;CtdKk}{N!wZnXU!IWj!a;(sQtM|Fh6wnRWG%Tz ze1)g53rei>OQFRniGYa@9@GGTA9eFTNhMsl2F-3Jy0`?MJuF@f6B$Z$RNwZfxA_-f z;|<1)Pi_pU+B~{OL4mf~jTv>+>(rKFN8%4PbwD4bzl?|N&vb6 z1NY|dkDfaZ*LbumPk@JcX1e`<`&ToxldT-Sd^y@VwE4HPygHhS^fD%IdEnPRaSJh* z47rMKNYnT(cJ?YKu4DgSrciM9S(%Qb3P;!Bmcz1f({1{=XHuNws36J!Z_17t8}h+@ zYF9Mg_Ytd1i;jpwST+kQ&B9t-H5xtIVu_H*Zyuj#K>kvy6b`=6@j@`I%#c%E_nlI1 z`@xpk`Qw&u4;;!VRXoil!ZUE`%D!pdELTVax*aeFEu`M`D7^`EVUF*x^%|Z6hU8if zLR~o>vwey8+ls&81fEqfgRcta!jD(@oNHBvzqG5>k>O4GfW$vZDmg(A|IUURei?AR zym~W*9l_cxB}cAAf*ic-{~cp_*UP&M1q7GC0ox$9%iPgel7^@ zK(9oLwu>DOPW2{N1RDhIy;9$>5G9n4j^vHweHAMbnoA7~B{zxhTlBP;h`6DiF`1gB zB~0o(qtXq1qAY0whj*NYzVHh5+QApZOhpzSf?CiN`V&9^3b_oaYNV$(PlD&rl0RX4lxz=o()j@bymYqBUG0c!1^QfO1QW`u7U(OsiDmx0p z4&+P(GgWlz2iMIcf~av_7tNc*!*C1Y%g^HBR7@CzH-9~FhSiNzk`58b$d-!JcwKEM zH-*PZ5UxmDtXhN|g;vDdPdR!0kOu;4p3! zYRlC_D=Omjw3Q8<+t@1co&MY#sADE+hWGdvBI4HRA!)n}3g6J8u;e#U&U=qVSK?4^ zU6VN`gHz~8Wjn9YYkVgbeYjEm)1C0EkX)O#ae_}eV6gb(62>@^mMpyp3n{as(^s>L z;-+OA3W=zELzKmW?}{zhfX{ocD$^x_piCpkhJOx=)+IE%W+#1)=dfJht$OIY<6*<*HLdA2$Jsr5~}znfxU5gdy+X!6c-4sj{cWTKPUzG;yTxTsheV z^i)5ky@I_k8-gZ}`Kr*sAU~}WyUnZJZMkrZ2_fD}{>RqoURD0}1x?H!J_+dJh9;-T zeO*DbQ>-1+u?)<-!>)m>1Fo`pUouT;i@uc*()^(@qxn3eCt2P7Tz_)%NjpC(ulVu< zx8-4PqS=$x4;`3j#z*i!430BY8v5N-Y4XjQni{3^d7qg3t8QjaKnM=*g@48#gQ@d1 zzD#rdJR8_ho=v?_iuI#)g3D#7ygVKpL0ByaE_QA0NLQ~CO!8&j00WH$s$*WN%_&&9 z)L0kt#rI^afyW!HDN(ZZV!AE^pz0a-M~YStgwF(w;p6i!C|jq_xXm(57+Np{B0Hd1z|t@IlVrMH*2PK zPcP|_tvOwNTcwz}3d(FbFKA*c+Om|9_%(=Uh_rz!3*d`YB>59axnP9_5rPvwy#YI% zZtq)Oz2c6yot&J<3wvuRULH2BIBGW^r2-4#s3nJ&NM7JB7}WLArA%hddxX`GrTGNJ z6k?)HHjRF%w9$v#K;C~jah3~xGCgRExoUpf^bWtkz7cWm!fb!- zzBU(cVwsx}nT#XRXFJWkJu3+eR%OxabS}E^WSos+0zaZI>CsXXxa%NDiK3SFw2Qh2 z1*OazIjc5byq0ZQvC2ZFU9)F&ynZgdT#(85mAuJdqP>nc(V69akv@t`l=!>hzBBDa zl3e{75k!E>qWuw()Me!SX>=OtgXu)u)RlP7m;C*gXIUM6J{VmqUH5+)E;Nz!WX_}{ zOQL_=CYU-Ha7cu%R}2>)h?s8SKU3jAGcsvDBQgOp5pubf z>?3)-zP!s{p%cNnn5w z3CS~d1Ibschoc4ss2O$8tL(AQJZt+1%eWz))7M$6Tml?is7_g4-*&WJba0Qs?vovj zsziM?--H-*HiY>*m^P&WJ_Vpzil!#MpXIEUngd$$S)5~bvFeh7D5` z+ddRe5VqhMY#_Zd{MT)(7T3khKHDP2#PaY+%yl0l;bwl}yJBZ#oy^n4ZF_@bIzvVL5tB4tA zF-Do1{2pJqx9rIqEi)~Fa5oy1zSc%Ac-l*Y z9NwomYkp6sjn7Vf2=GBRxlBjP+wYuxir(#vz-uWJK3wak2)IY!;}8JuLDv_f8VEhS zq1=Lh9)xn=nX@YVW@D9*M=(bFx$MY%tb82}+3pQQp}?7Dn-VMYtr@Ws4o6|MW20WA z0$A-w!S}uv(WbNUcM&_EO*<8*hX-6xi0v;AB~AeeVk*YuFYm{r2eNQKQ`vmlfiR#K z*`0y6CD9-h1OkdR4yOxcy`u4}l<{cPeMW}5rtX%_cagWgNqn1kcaYG3@OVMPbnE;H zT3i6y>jvy%%*=OLZ<}Q`4)#EkCW(Y~*E?fh9jY_zqf+cdz0fVSYo2XYY7Ttdwz|pwo|0olDPvBYKec3#10Pm&ZhjGZgC7 zX7C{PxIky#6?_{BiQH4JI2W&l(FiAa>sixli*rT2?cDA;ph1ndC$fre@&fw2(^N)G|T z{i{SAZAViAs2_8=18T6rCQd&+HdiV4gDtXBi3NoY7JR>UE8`X!NR9-a52aCs&t|6G zveMRn%j|f3INE3scOV&(3123w1T*%e^>S1;=IL1j zdhETe`Sm?XHx0)V5F@QCl@VyO=jWW!kESXUke^HoW+@$uRCL7-pksYBazn3)8Aql) zLj7gHOHt{09&|oP7azhzw6-f7Iy9g`m~kw3 zhO|65?%#stI>px8^qv^`?V^3vng#s$TZDG}dwg(x(~7J_N*|5}atWO#9?pye>YUG# z8JFXQ=*4~EOl{tutM=&zaT4l3PG4(??N=!o#jadbRIM}l5k=a*`S_e^am?&7Q5%V` zwQ^JDxaO2svQpGLe$PTz_p+kd?3fWn*NF;4Q5f>VB~jGpu5v5Psx+Z85j_a`hF zU5895rMvrQe35=s^;7~IsAma%4e*}!kQzf%V(Qv1tIAA?hRySh|H~J6|5WBT8ply5QOd?WZmPLS_&m^Skq9s96KgeD=CM`zIi&5$_C-Hsd)hOO&+Dy6gJM0D% zmxKm;J1E;mdx5DmRt?CH86aaZdbL`YrvAQZ&CRy??JqD2<%ybUuGN3lx9NO*>Q>4S z6*Kt5_`A@-SoN4go`jDFL*o0?n*0l?!pC)i*!vN_x3f%}6?4x^W{qK%R9)nj&<4-o zz$*qR-OTw(Whw zjQaEiI}F>~(MhM~Y06m;g}TjtMW)W~G0{qCcEcRv4eWTnyT-&}PiI|K6TAv7yE8O4 z?;}dI5d;`nYSj&(3pctX8oZx%gnah`mWy2wWS_J)J3%^%67|0x?){=&fdz6NJGmH+ zqTq=mQs#^Ws>4KTrI(?!%&@l~>Cj`he0DW#nT zp4{-!)xDnvCP1uxuu3dk*Jk;#dc@P!RxJ}9WLmMt!t~|Ka#GLcGi|qsp^HcJZk}zg zwH1hHBHfmH6~C$~i!vvC_x7heMY2!VNjyOg%tn>39*cG`!|K9?GpqZr7S@tGWQ1pi zb$83}Jucf9|50Qu@V#4S61f`wr%*`NuFKza*03ZJSJ4BG7{(UU=QHb;E(EI1rVXMX zk=uJ*C*5Zk8c3|LH7m4e6cdfxt@@wbU>_kRH~hY>EL71T&~k^|uVr}||15jCH*{~P z*iLWTE7L9w?uuLsM%c>m9xEy~CFdA#V>=OH@Hap%$+jTR@0 zACdt4lSH4)_3wT5^YkBOwkjC?AFH$i$x7?SE(!#S-S$%egCT&zq#Xa%Y-}50h0u{R zA@GFDx@1xA0#PlsYiD))544+}9QS)?frl9UGFD`S=GijcaN^VkcVZf8(X*3Y3rhzC zrr?h z_Yj^5p#mi`1woDb$@M8R0o~;{x3&`A0Fk3C>Y{G9tJ4UPkBN^yk7nm*VJ$U!UtvP5 zhx6(PMM1-D8joU4ZgGGRsyCqBnF~bQIXhB+7ko)U-Ey zTK`G>>Nd#1O(C5TPKNTp_orc+r6PP=h42se7a7eA851Tcg2oR@CD{l!A-5hms{X^2~MfW=8EU_Ua?;YQlfsMg&|b?s14!-c;B}TGU9=i(sN(id8ovs?zV%&Dj5~ zZ6Ls>=qWu*eIo@o_n*av(qfrj8muj@&)?xZ8|kPrvhRb5ie7A?Bev?TnfKke*lpnyyXJe)by(S% zxVX^Dn&+C;gHQmD)c3S$Nq!bt<{XeXhsgbrxYx3+81MdE^_!-*Ty3t=zzdJ>pt zDLs>R^`{x0iVP1JY1C6n zC3cFy@4onoCXBGLnr2!#SR`0fOIw=WxaPT41|AiI#Db6?B`J|k zCPAR9JN~iwxLivXF@mfSl&)OVeFu~m2{?;t1DJF$*nxTNf3<81^DcGOkKuZ6b`10` zVK)grr`3Z3edC^OU2tSvl|v=`aW+qMEb4SE^ZTgs2p)v3qjfD=70O-J_ zPJo#f_+{eHXe!fd|K;oao_rG!eC6wUkE>+@dsH>~V{-$XaK)Ejnap>7F?|_=-L_`Y z1MB=Er|S9u^86dIcAJCHtrtX-Om16V;~)Efvh1fh+i1Sg=yBj_kx?-3`^$-!xO+n# zeCF!~$U(kjBkE7ETQ)NPw*qA06r4z2uGI~2vW)~B_vf+~)kl3S9fki3zlgnbVusY8 z5nSLN;>K!vE{jaE4M+qEtY;M#MWAb@G|i1gX%6hCiLJDC0Fdn)c;B-Rsz3|zCb4iH zv@>;ka!quMWMUy;$-ha+UGxx3ilDqdf10}Z0ucf&ysrAVCrPyuL07$k^ho;&qW9OL zn_DBCC&sTUnd(t-9K&dTxoy>| zjw4;#L0Ep7zF)p^LwG>jKBPN;qhCt21ttcG)SOsx()U(9)<13DkC8*lNa)!`wG@QzN+> z{&eBY&#u^QFZV;yU8?WD95)!><^2RC(1sQR@u+&U*u42wDDlUY_-}k$ZkVs}ztHeL zKLjr4ukA=%mH1b|a2<(3u%C#ps;SdU1b z#-)y;gRurqNJQBzUV&?aJmut0jV9}!{0Cb5O18~1bvp0v6VObT4@lMd`Zea)^anNxnOL@_{!87x|DU6eZ~A_!H+}kqaOFQutG` z=^7Uh?mLN9>JHhlIDeIn4|4H}9alFf!EKc7pHAx5*EAmzz@s8hm&u!;E4dqQ;|1|P zb=vsCNevYlq;WXm{+I3RxhWB31XrQUaW3gVZ}Lea7_+Y7GG-*m@~XO7P!Mr--xTcr zUb|Rqy;z47qXd<(?VEymAAwf=?MY?e^tk7IQ66ajp2mFmkb2b|+;xgGudSgkJeKu( zyMND7+i}^U650()^AXa}q~1Xy^0~#jJ-hL{QcE#mzQN;8C?mqzicV`Mo@bAQA2IQm zZP{$#H#xo+D^!Dpvn(uDnV^%;zRSNUZ*K7JO8r`62jK&xH=1Z`3f}LqSpvit<_q#2uWQ?{sH#YT<1z=U6eox;XXOW z+T-~zfsG27goJ@Q^2HDNLA~A0RIlZ`tb90sAcnCHDCIQ_%vsLCSv#aitH`*P(s7cZSMIPyZWH~2E$ zgs@W+G0g|(CCnP&?u#$}MflvIE1GFN+{pfEA=vWzR;fM|!|*J`1E8lno4^5pLW)sQ z@8L^#{HySsl!VN+JB7$)PqV`+=6oXzUr1jdCOBBIq)5h!;6$L=)8%m|R-;9ko&~Lx z52sT|5)I+F4)f=X;j)q9MqJ_D$J|{DCpX}TU;3Hg(;=~|r6PaxX&!lxtHtuM@2LKb z)IxpBs<}L(iOc-q1(~8u?r{lzV2Y4Fvs8w?6_-YDF}){p^nPkg{J1&H3nj6;uYY_` z7s%}9f|7{E@HwDBL=R>ip*$9@uPlJAEg&CKnfS&6ciTdKgTO#p6zFbVNvWWKL{Bl| z(|U*aiOR`H`RB3oM&bj@uYBatkE_WIo`cUi#AugiL^iCuPfV6tLIror%h7A7C{$i% zpwzu!ce;i?sNdZ(m$Qj?Q#>M~e8nIyo{Zz@jA#M=Oo5s6yh_P?P&<9BmBL2+n1;ZiVNSR3Ii_0QK4Eo(#cBmd~+piJ6H!E^8 zW6`q6B*1ySTg*kfNQ^R$^FEJr*qM~?>)ex2%V@k94Z7ft=Xb@*`=>$G6EOO2>~e<8 z`mLLK+MaC15xjd2V7O7w)Bx;B3mhUaY%`c?V z#`{;}D4MoI{vVpYGAhdc>sq=yq#G#-C5P^oR-^?~q?@6;L0UlRlJ0IP=}ze$dVm3j z=DoO||ND(Ev(~Wqan3$x@3Z&QX6A#b@!`iGY`8&PV%qkYB1cs+(Ia=GceHnnimX%~ zL-eMwWH+=QNFk~5n>oJ0tgS65R;*OuRr<4QdPcW}Exe7BpSt~B9)6r8AftvHk+7{S zEp)>CwPiv|k>uHPZmEMXfLi{JnrN&zFvm)qsc@w^ zN7CcVtg|g7L%6$nUq^-kjpU`P;`1Q)xT4S_g0*HT@-?%O7?h-XHhiz{0rQ_ZJm1%pdVzl1;|%)(7ft_O%A%9coE-vZD)Qh zg8>qC6jkx5-ffv@Th|-yaud1R#ZN8B5ho5)1cb)k@ShH&m%0iO5*BfZLwY+$3%&;x zImVJFAKefOYe)x4CyD#m3-M4DYf?Ulzt5P*naL(oBN%~I2y;ZYLQceEwtbu{u)=7c z;0{n9pIN-*9N__LLMA$TR@$L`F{^+MYVp6KJnVD6isjUxc`DvZmYL0g6n+hpRdq4^ zJEoAWQ(oNgU}?SJguJU6FEIRP@+YGZe7W`6Hlc65tMNKYFj~)zldAbuuge7O7DGCs zF>2no%kYUN0mN=7JT0F&|C{&@qLVylsJ9+|bLPe4gU@ezw>aaN;mXG|uHzYHXF4X({U@!A6ZicgciNjYnNh296EQEyS6f|Yp zKEuG++K!{VoR_u8MqfjhaFAAwMY*Zm7arqNIUq*9J3AXSBJz#z)Ft`y1%9Q2hjQ=9 z4W$nTUYfYNh0ogkr@5HMhrx+ljf(wY9p(?N6Q>=L*Pdbdxa8=_oE5|1h%u^xT75Cb zkZdm!?^xrhR_;}E4^Bqe9#ab8f43o~%RXSasSTE;2O(c@rdG8R?9Q|gO(ic^ zIzS0_zLpWE6J>>VBSFAh{9aHR*%CH!)M16ySx`u-{#4AJ88(&!(fY3*{-31dyX%)l z88-t(4ylU=fV+}S)^N^Ysj2ZgDlyey#F4HpZj1YXL%B2mp~aHylNiro^2f_xfqyK5 zD1#V-*gp3fX;&N?iF04`Ls#az*YL!e-ice$xh#gT1Y^Xiqjx^^*xwfM{QVd!ysh<-GtG99wM=uJDA`dP9OZbq`YyJACy}F3d3l#R5}o04 zP-P{*nD=QZy_lUmj}d;XJ=m3DhlE_IgEs3&7|ey zgRz9?r5VE)|3GqYH8}H6FeRI%HnSREqCB`MvxpJkp$Fw(ucK!L15-G@x#w3@#Pev-KK*;8+vY>NDccH-oB zKIm>> z@EO(Yjl9$^$_3ARbC>eAj&6#nE6mFu)o#*{kqK@>C?WOlbSt~OB7(|ocQNu}!}*Ev z(UQPVsqS)1!aC+-)*!L2*2K3^Kz)8(o{^+)rNxmn?Dua`u=g{FI^Ng(kUAqA3Hn~{ zx3*P2I8k4D)o)6S_zxA}Vskn%+!&T-*YW25K(I5K3CZ=-;%{3cx4XU5Lz5?AD$k3q zSvc7>S8s%L($w22mqQh}gG~$|U^D(=9+A)P?Q?w5;4zj4H+gH9YFVX={x_{G=TQvJt7k4@9i?sjflO80H|@}1}bICmi>Iz5{2 zV`)~11oZq)ND=gy&Zr-hoH-ED73pR|b?s!-8SxtY#0Za!FA_bM-32*a?3<2;ahQJ| z%tqHssKlGgX_AyBlcdlYD&R~R&e?3hgza2Z6LCdFVj@lCSMj7K?&kbi_&48B=)8xX zO=b*g57tLZ7eMmhr=c46t|~X_7?{{Dz`Oh@{t>u5bzqVO98cyggwZ@^LM(#k*DS%Q zp@>Xl5EHyuQGC7G-3oz9%5NbDshrEFp02)v?sm7m4*l(o4e>9wXj=shMXpN3rzM&E z*7Dq9Ir*mf_EEwke!;$Ddt3BOh~&|ITOHemPJMM5mlVIy^(Hjb21E~59~HlA0Ey(9 zDxLoBF&xOjGtn@vBg@L4>R=nIen|)>3Y|(H@(&FUT5QxA-k$yQ_2w4$>7`i@ehmbA zJ(sgnJzb>n1Sil?t-gq{CG)dhzsP?eRuF9&0FH7U-V8Ti%YQ9f0L#>)gtKysF^Qfn ztgzq2O6%TUp|b!7i#%LEZ%ib==B^LrFQqGJVOy2r(1lZ}^g3w8X*x9FT93~JRDIiO zQ*`<<-5ym}DKGjB!{^RfSD`SSeY~|aIp%Efm%Y~2hlZDtYA8SAfs)7GB5USuF{7{N z1+U&8_B!}56Bj9?O|AG+>VQ#w#`RRKW<+ed|L@IeYZ&bv{38b&!R4d6mF#vOIV6Kn zz*h%P)&E$8G~)in-6x;Zm{ou`zn!sQ-&o|fHf&Oe|D;3vU!N`izdkGQEfFuhb3vtu zG9J?j;4Sj&urw)yh>1>w`xzsyYLw zc+`oq$;8Q_Zs(m`G!?xhacA4NqkkLsuyMS~a?dv(o<5||>oRFbTpan0gRj@F2uQ-w zAI7F@K(Zu1zr6#Eix+0@Jd&a)PqOqi-T(FWSV0l`2Ady0;cb@~=y|6p7G&N(lWj0F zaHbe^nPYYZtl)7_1 zW~uB!j%mn}guc()UoKBNgD(nLm8HFaQs*{uB{8h_} z>1IqNzcCdA!3h)Ays*g~`^55=<`CxX3mGQ0xNs$Q{(Q{JU1rG-bxXggLxw0KY_|C` zYpu3ou{rH+_~CJP2=n9w1q&UP5P508K_+Y?YS$GZDMVF9%YR#+KOQC6L6SB3{G2+& z{{M60O?|6a3L2j<8RkL4kI&k!in4Jic`)HJK+K^J)J(IWV@lYzNA^e1E!$n8sQR|? zV*oYot78|Zg?xhf$$3h5%jJ@=M!4khJVkGT+b@7O1G{^fszb1lSddzDsHW{DQ=jiW zz77Al>=a$pmvfHWoKUKkbe$~zDvQI^g&q$0DbQ8wVFpMEf&>dY1inE@nNBRyUG=ZK z@ci&`g?z^yX7eCd!eNbSO|POw4@bnQ5kryYdmJ);jd!k6VB`RwKmEIyVy06Ll&Wx1 z39++%hx6wCkj|sCu++%)#h?uED(L=U52<@Fg_;Cc!u-kMVfy8T@WkUhbFins4A+mG zXk!+k#`8RDSPKHB)pu-5%pQ=q`Y1K+)q;6!QIlj{kwG@kela8i-|e4k3Lt70cDyJG zVi^4NQ)fQ6*H-~L>2^$?O^6^!tk^!u4A==#vZl|Q1WZr>M-6O3l@-1n%6$2^#9w@{ zI)P0kt0k_OYiHF;r7wkb&^F8;Nzl+tt5iP58PGJi?>^jZ-W39UGtTYuA0+E9*PHMv zi?0%d0`|w6BHkDOLV2E68gpS@@R>t%F{G#Y%nT4Ksg)(Jf7=-#wg|eQ$v0mAl(tK~ zX%r{U628rQDRaJ`tS!h26aF*aNl)v;r2FGpom`c0XrUh3kvQ zHoRsu*$;XAtGh;i^7f4TH5=9Y@7NMm;1y>1dZt$AJBwjS{uO#$iN;fAJspbU_;(PM;Mn?o09K4JWfGIJpXmpfr!}r2MN#1!dKAQN996j1c&d z#n0V0WTKrHIoDpmg9s7zVov~`*eBWr?WjiA*WrD*7ST9B`g_#;TxQr2VMN3e{OQr8 z|HIFWiRpdQJvD<8=hNjWv=SIv_jEZ-MoJUdEz@Lp=n6rWYnKLo3}VsXuw;2U=|nmF zN_;DghKE7|O3ub9{35w1UF<(m<0gdMT@(BAuXGVmf!53MEaWyT1uBVVjwcFw*7k3e z;3aDcXQn?}Pda!-C7^@ihr8LI$$1~kK7ND6XDP9^s6o|aaI>7q$%Zc=Ez{aEzG ztJ4>$6Qy&9kh9tLudvdf^Z2`*bN4`$`uFiOYVh(K(KBU;g*%39Ej*eyT;CRvUw$Ea z$SeL_jee-MQjGbx8I{Lk*9M-*gFpvT8;ff#>wGn^0cx0+I)Btw7`z+zk}=qGt`I2@ z%;*sbZ5eAFyVHvQGF{A$OmhG3m_R8=*Dhu_%cX15k@CiCz)Ptl&)jvT=OtVXZK?4n zkif~{F zdm7KZ2$-X_U@+_zhv#=CN$Xp_P+GLFQd#sIh`56{9P;5+n`U@B6jh)!7b}($V+VEe zafb!^vlpjKQxhGMFDoHbbAJ8~ZN4cU-h#5tPa4NN>#8a2raPPv%zBb!oOZDcdqsIg zmQ*~rd=c$Ww?;wFa)-t=Ao$=w?NXNte1AnCy+#$e`YuFEz&Su&29!L`S4e` zf;H9;8^qbsV=EF%)Gn59o1OeW8<(S$bH#(!q54B|F%~JyCM&)GC@0O=0{kP8LrB9e zA7HeYL^OV7-yJ+02Z>C_C@Zfl3xtR@@NppbuF3&>q%(p+@577C*F6KDiSg@h+gr31P@4~Sht2va8g3F}yPNv$aavE>&0VfRtLgBav{fuxA7gbDR(fG>WvIt7wT1zz zp*huqQwuyc1{WRSGo#s(SHaxZw_VOzvx}d#?~ELw9zIrz?)Lw<%*=s>={(0bkqg+ZkjhQ{R&7V`6T@L3GJT8xira^0cVC5t6~e}<{3T#-;;l<| zpcj-&4_{3Jn*Y&IBL(G{9Hbm`^U}n=yn>f_FG^y$&7~Yc*OuJ#f|7V%``a@-?go#SnI>IRV9yH4RbBAW!}5Rg-d!i1~c3595FFg80-l1q87Cc8FrH0;^|vyiY#)!XO|3$#o=%v#6Vq5Te z`>Q@i?vvD>=68K>;~5lxBx!AmfdPvrvli2zTEIc51MXV%GGu&6{ZvWo~32=U*I8eAu$B; z2Gu!_OR$tJROPpDkx^OwlrN7{S0Ec`yp;IKres$_j)1C|RUebsiL)7pg6Tp&Q*7?o zHEQwqL2;PnI>S;Uh0f{=ow#{l!&XK^MDUwE;=Un9(<`s#sNp-Vd~o*JvBd25K=L+dZQQ>U+f;r>=)& zswHDk(k$O*<`E;2c7U#Df;bl0QU#pLwk|rg%)GdR*zcoZ>LgRsu6HEYM(k}w)9@c; z3lIi&GuT?NI>ew+=8SJFZj1oKm9&~<(}@)(=G}WnI>n=>Dww_J`xh9IE+u&J4g+j= z@`!=vLR2V0n|VE&?xL5gD`yl_@!0~MsQmA_+QFYyxuqJ~ME$IU;BC%kky0JY+l?&2 ziRvIwgUD#1fyqm3c;VLOb(8O7-67PB<%bn@_&Z{Ana-GOZH2@x#Ql~}K2JtQtKU}U zt*qVE+|?MYN&a2h$j=-^@I~#hYLAl5-h|6}rq$h5SNw&wudkPbv_LXSYH-=7t3%eku4IejENsmV_|Xb(Q~< zPPz_@cfDRMA)ozkasv{BeW8)3A=Ng*jSXm_OsaZdu{z>d1nF*Zvv0(nUsI~R77d4}Yk8Q6W?Xsy0#l0|G<&ZqGb_0}&Cf{F-s zIv@Imd${gS@W)y&lS+hEei!7$SdV`rQ?8OoEjeRFM7L=RFw=GL4)}TzwnY9p*5>RvB!g z^q^d=p}5j1B1JFF{(p^& zqLRiAyKGy;GRe~YYzm(liHCMbJ{mshd0FLB;9+VU(8VjdL&-A$)0l$0AHf2we{bau zGnKP^I(^Q)U^}oAQeQX?+6l#bCjJB&M->#dERVykRm+HPXOH(;v`?(jdSiHO~R1oSgjx1ac}ln1>5|k z+ttFp{nBFWD+iteg0`wMv4lCCBP7DjywSAofq08ha@eBt7m_(Ab{#lrhm)= z#*tf~W#XW&1UV3b>4QFjN-_O|Eqt!nyc^f+o}Pw-srL=!v0c9@k;%78j!uPVy!3g} zQd5up#`L-VZ?os zcxi9NvZp<)f1k=E_#tRy$-gqV)s|z z$Z(3jaEQLRSl8WnLmTW|2fH8XCUahCh`ybXq*--aPD|Du;0qyDhW$Od zda%?#eq`dhkqFeKdY?&tBr(irZ;xl6u|2*Y(_x54RKoS|sIiR=?_GrUw-dWdosA<1 ztRc{2*V=BPs1x-0dvjH?r-63cm|hKfvwOQkvYB1iD7XQn`|=qW%!uab6pbFDs<(@9 z0hiGX# znmMQgpp6rv+0ay2DSINo>)L~8UBAdN&^}8R$yQQ+Gt^6#V_y=SeI%ZFM-Q2{_?d%D>nDPjJ|$1OUHT6ZJT+0njtzB ziH!0vgK=MI4q%2t=Uc&?Qd{!6hB;2*O-L1bsa>z-y^Ey`Lzt{JOd|xxcF!ILd!^-qxRoWUinOdRU_?6 zL4*N>=4wTBWGOXSCc3w{A~*ix1g{7{{d6X{*uo^W`JUOn0|o&U*oXP1RjHLox!xR6 ztUnNPgbCSx$nbHJG>xQibVZ~+nX11TLq?9Yo$Y8P%6C|H9Je)vh=I{#+nm5P_On^DW^KfB>kxUuDmM%)$S8|9`ds zYTJG|S4g0z#vr$2818U@d-LWHhZ&sXPInB9#}oVo#BI6}I=>|r8~1!D{pkFk@fu(w z{QmB=mf!J@E~Vi%&z?b5hhKO1y81C1(y#43$_2U?R%-BJtSmfj<5a>-ucxk$j%kS< zcL=S$;nTBQ+~{7=P78OBx#)&VE1q>iHWgXf5~TQDYn)$KgZ^nqucT9e0|@kdg(JWs z%e)ZlEI zLWo+E&=9Yh?(-Mp7z=%Pp_*W37WPf`QC>)Oa`a&!qcE9k23&gen89>^2oAXL98)iG zdc`ml*n8jCTvN3@R4jE5V;u}(BG*|oSQJ<^brvZ1;i`H)G5_wIor|jfu0P)6)IuS@ z$aQCYr)-(sAYHPxo-rk(YC_$~7*C^vKUH>b`;_OjS>DQ!ue^MAU*EnK-;npN_CbCf z0?RJMotl40K=@?R#;XO6@-m^IEL5^6<9r&~E(FG%*u8~{Bt(V+tL-?3E%NR5XTkmh z#NdAbrSz5O+4!kz5>-l2bnp%>H)dMrf=))iDdbv$?`}?L;!RvGU(rtOE{~!Qq#eby zKLZ?&+t8J9|DGMe64&qFvHC{e{`4ZI?5hQVgf94ARs84Q>7H6EK%Z)(I7Tc+)S=sP znPl4zlKMSv$)k`FAVjBqDKdOHU@K^6^b+1h2O_`gqtA4e}h+} zm~zaP{}rF};A`5Aqg-^-v?PZ#uFq{cX6DTa2+IUFP5f}bxH1zhqVmB4Gt+Suo5vbg z5ofV|=%xY)R8i#VgFGuWZePi8N)~_Llmlnb;EEi?o%TySyiwlKexo#Xv-Y?#zp*?C z2@PB8C~gtYH;Z~1E5dkx4qsA`&XfNbCoXaYz}|q@6fwajT#uhH{>xagc}hF>j8$T} zKa2GXAD(N7Ul+2}WPj*(Ea8kjO>a4h2VDBPVXs#ce|W4&ieU-(kge#f8jA3GH#r(H z)w;+@sXf5)9yw?V#jX|@MK8{eXD?Baf?rVDz0oWF7P&CD%Qkn{l}z#mb6k9}g?`RE zM_zF}1zGNuD=B;7{5q(3fg7zedaASLG{j+|3+=B3_wxquOm95?X;9sdist93M%E6I z(3D_9W*9oc_*UDNrk|w#TO;owkId@Ko}0Vg$AjR>MCHlM1ci4S?IjL1xKxU0vcceh zMQknN>8pHkI@)}0&!;5*fH=rp-hrbx%mdUWVNJ~wcXOZ`P2;7nBg+&*-`3)!{IS7C zPqg1sA9oxRIs_JMqmj^VV2zHTd#+L)BLBlYPZ1Hh5sL$9V`y~xhvYUc2S2zr>-qy) zWzkmt^h@^FF{(=l4e5#i(^M711p^h*qIv6atM*9H z@ds);KV#y`bhQHf_o#i@?;A0D!@KNhb`axjzP|7pp4OH1sMz|{xqVHMzuU>t#fPjQ z{>~-pIgp}Spf^0Az^BSbQ*7#U{98+M57SZPcn;i;;+#?!HS$8B{u!zP3-?NRs!$I$^yIb#m@J}WqcZ6C+~X$Ygu;Gdb~{UjmfHwEt=T? z#m5B;u1MLQvzHfj0`H^Wf;I-CGT=APWF9MLKzNTfbcF<32ppkp+$&ZBfj_7NG$@wV zFyStHwaBorwI+ViXKG}@@-9T=w+X=@B~0EJEbV?D(JwOBjeiCn@on6yMAM1*)+|Kz z1O29Hz!vCdaj1cR(mH+B0O~jO>XFEFdmw%zrSs0C3?V12j|4A+daPw5*kP_Hexpw? z?D5X22q^%$4ER;PSdeTZn$7YqUY1l~<~sd-!I=SCtq=Ez$V7}6kvpd+Q5%V$VrcX8 zzA9_G7^`J;^nxYo%I?f@lpp39Rt7^}hh@s(Hp7e%iwo&2Y>g4M7+n=>p28#4phA`& z1Irg3%Jtu0LxEZ$7r(o*e^U|i^|9jAC{odiGqUZ}?kg`xq#RWIxU_iXTpu@v5>fh2 zEfPK0^JewzwPp`XR&q}*d3EU(_K)y8AoPLa&hG!uQ5MO$6pP~p0a;z;xj%NY^Mj>+ zNF_+Ec7#C*(7vI*ufNw0`ZyFt{9b8xhzLXK(Z>H~u0_xG@b?Z0nhMEytk~hi>cKYZ zN9KSynG>H1?17vYsl;(wgf?^oH*lZxC?9ZS8NKp(SK334;^|U>m=Nwt!*`>nFO&(5&V@19c5{y>|C4-;j*>HON}e2F8A!jb1U z(M5_ekO+K1W>0U zI&Ke2Pl1*45=7mbHQnQiJbv3MO;9mh5s#(dRTuBP&voCt!4`jHFVllMG|+BCZ;}ls z2o{wspC35@)+!FT8*S}}sZT15&m>_s?k0YjbxQ%&4d?5_sCJLM)Dr6D;izNo;#F^}IH=F5oNDS9YW>a~ zVNy(58*v18{nF+h#Do5mE-)W>CK{Af2?I$G>GFoQW3kl+S2on!G|2X}@zQFz+k!d? zBZJ5C>^l77CI{5`mvu3*QWRfJc)$1h?tBNq^lcI@rDBeb*PrZPiRS?G(DSS|QqJ9- zaEW?ODYBdX(<#ttun@Wh97y8OqsYr7$&DxLD(0XzI zB~~KX*&YO%^C2rT59ec=HvkHT25apHd}p4C+X?3)0Otse)yJ^#lB(ju);zraY)#^F5Sw@WprO|JY$o78 z5QVNy?fyiEE=({oCgB>{*{&T!>H1NXEr9vj&7Rg!@uwi;$2*$fcfrm4+bV zzO?a(EDnh4e87Cnx$oaTUi$fle5v*rU|0s67kru$!3#yB0i3m5?pNQu^B?M}PHObq zj9>}XgQ}NT2$D5bG-Av6yqIDketiCyw)zqm_6%dg)>@d;z0U`M`&9W0r8mvMHn7_S z{A7LI&#QZ(M>FHojNOITOE^f9tBmcRr*{X>^G(~HsWiyrI!4wHA^2&fRec-hWmh2Ev~_m@XH+9uA7!3sn^w|j>%W0K7}#- zrC#Vq2T${*H#>yc{jc@DrHDvFL*8Dsiv(wxwpb2T1)H4T@cufySR{WMwI;tom4)uN z_5=OCI`OO3lxK+rc?cH(bbQl@-b`xSg8M3m|T^YI*Ap`#6!zhU)h z2VygzOSbmS1QE|ggA!W${`u^QKj+U5#$?~${+X&=UduIQ*sjCJc=5tXN=a5q$1<5S z&mi4xc1kXmZf942?oXm}kAP4@ZvG9}-xa%V)^C8hvO$W}fRr4`D)3kSM{1lJ)6UWC zv~;rgob1o<)b6&I;Ia@0DAE%t+Q5~JF5xAEoyYcp!xhLPOeyMCel|3+7$EBBLjzN) z_tq>91N9lglinH3D+KQTgF`EYWk955SwtDRK(Xkm1_PdUu2`VCD={C^j1Iu>t_n)v z75viJonF=8);(({EU;0++x24~LBOpnHNeT+(be3ay`(G!U^a#h)=}@?`JSA=bol~e>;+B@Bkuy(@&Y~z?oXjD!C<%bw0f(kd^Tc6K z)8i5m<@yAARv|*howpC%-Qfg`-P_>M!ds>6NWHtkp@#jbB2j}PyCNo0B!ZCe^>g3f zPMfb835GrR-9wd|weNnXLl!zUTx?Da

$2r}@SWWr46fPFK6ZP;mEoy~<|Gj%a^! zcXm2%_-ADR$Bt+IL$?n`hO}uLd~+xhCYh4`N{5y&zYWC-VXpVAV0)V{qGZ8V7$iNE z2@MZBq;1cKgFWYqApWVW^GYbZD~vm$cjjW8V#9!uY<+kBg^(W`bL}$@Ir;zNQu1#n z310Vd9OIS%bZRflzUebwLbFPlM+!UNgY)a3=DlCNc{_u{#k;%KCncjI19&qTEKx(l z!xvj3ryOHi2ch*6JO1CD>(AOuv6r1&URA@SM(emrd zQSdN>5`Y>oVx)>+wNKfx2DkFnlAY9_06}(DlF8$RChZvV8azG^#2}Hrm+y$GS6AJ!)kdU5U9e> zlYZDG^e}+VMzM3@VQqp-8K$J%H1t)!0fys$=_8++a+NcsS>1(n}+;L@N3cqPjL_}=7|=wi}tXTFzRNN05=-LZ0!`R zac)uTAyhwY3wgX-N9S#QQyHhiWKxo8#2>jL27(yT%k`>nleu$ux%QLBr)}cEId~7? z?bSb?WUhlbMOYUpAi;Uqp4%g@94iyPJI=qRGP`RJr8*WHHcOsb$w!&fI*N-4FN3hV zf<>L}-H)ck?-KoGPmd}J-ZCg!9f(xD3wHfJv5#lP#rAmM{>)#masPK23o)IqE9Gk3n60W9^2&Mn^7`FI zBnP*BnmBQ|yfZ#P;dBAZ;c)oIbX5O)2pFkYvFCky6(sq>1#56)85eFCn3TUUQ|$k} z+!>Vqi*CHZmo^b>0yGbNr^aO`m8SL3+Qr?`)H`W10AnJNavkGM>xvQ#qESa{^<&QS z5C%kdxT#;EZ|^J*feN)r>KBuffR{q@ncAVTvVL(-H|wBZgy~0s>n;Yo&0R*lFq-j^)Y({W_fl)kCu^m9 z(bd`@s54-va{w*AHmTzczSdpMOA*-@@h`4D~5=QfObj-xM z%w$(hgc_GL#NJ1(BszsU%jv1B5&Z69wB5lhot(w@w~8+S79c1h z3K}s_v-uk!^1=sj8x01w2NUy<*fQw)jlG~Gc4QLXHS*ve>S2@eL^E-c3C~hqwTn$t zLtTgd6+qm5Vy7!CDhL^LxFs)ylL{ZB9)Qbkf7>#!+?4wH5@hu}A1u#@JXyny8Wk-S z>piGkVuWX9RwJ8;idvSTH+;&7C0xfW+WO=Q2DWJ9IloGVTep6Ic+EwWnb^(|Mkt@| zk4?ArEE)(9F_bWp#kE=UyZ>ZDxAdWT-Z#jywg-w#EARA!;7GZqu}I)!?U6~o;K&Dw z^}Tj8f&o76F8e(fL+9r>AxDE^|9l@PeeF`#la*W1j;0!vG%0-MO>{}ga|?LtY$$IJ#bNanABV-s7+D9d7FbJq_BE ziXgqoTymYfWRcXTP9VUiFZVkvH!&;@|Lki8ha9IId|=%=<|8gOUMjG41g=>5e0xH1 zHUb90A-t7hTvSd&9qnqDID#fGKiE?kiv=RfJ%-k1iLFp~l#CscGVLaD02AnD^Wb=> zHn%VrmEU7d`-GYE(dc|wAw*oq$btudgW66Rr_jDh8LvOWsRWoaRDa$W3O)92bw4;) zLNf#&zf*QsTTl_q5)hHD0>%!(gDPHA2uDE>3oSgQjk6`?^Oy1adMWCZuZdM;F&P-n z_cMPenvawAA`tTT|A1$#S$I@&uj1^u^cK}2X^jpyR+aGn9_FSA z`~y*`W0D6edWQYi7VQ7m79_^7&9J``qm<&f4`dGpS;Ia_g^_;3K=ti>8n*8q_q(hW zlH8)BM2;F8Mxo(7Sw8U0ra{=+OMwY3W z3s`<3z{V(hfUU1haz&E1W(`uxU>^5($)-4KjVI9hXch9h|9dCzvNvR56@k5Q;};rt zbP&YI?H_0)-30pDjGT@3QJ@H-dX1|oeZI9=ElI`Nt*1u&I{)2vFL8}BXY|Jzr^6O^ zTgNu+Ro4}V<>7R1C!gV+Y1F70xu7kBG3^USA!>3bhZ7*jw&MU8Mys;C3V=WH-iZds zMYeJT(arPH{@^s)w%_9yIqN`1uR_Q>Kz@+A;zVO42t1y^xBIn~p^e|c65fo4 z5?8nXnU*{!;FvZ7!lWnIG;QCV)5Gm0r-h%s;TW&-_KK@IiL7;KlJl`9nq}J0H4*05 z^J~rx$b~5h*)F#vPpT{UPT{vKn{)wFkqO_O??-YwhES-Pg$8cw)T;2%!_|~S3p4#h z)4jTiu3rc0taW*jlHMw~_Ki`C zjRv>7-R_}l;Ed9?N6G#SuxsZB5>5YGX2tTy)fmeLAlQMjQDYkCydQV# ztWkd9p(mCgYIb*NtgO)KBu&uGNK)>{>!8Gp8k9?Y)~9{V^7dm1wRbE+NfI{fb)GzO z;Mc{P{vN>Nmh$gA8ZhBFN4j6JOeLc#sX#JY z^mXLD(>F8fgi491m|wX?91^|~Hn#7^GdjAYOaF};h~wtjkrCwG=rgZg9Q9QzG9=;| z8`$#zf@!ZHzUNH7$>|yw_hZVHDB8O8XQqFn#mBLa$aWfds~qWo+T3;;4XaqF=e=iJ z)qVBez%ySMqCL0_xRoNl|9;5wo8SBLOIxO)ZlSTG%i&Mg_wLTeBYSbe$pgvboeLCm zzHRK8v~bztNDB0Hm0pcPomyl#TbMqjnI`-kv}Js#v(D2cfdA*gqv!+bac@@6R{wilHilitHiC$jZhtMLK#KDek`X88Cp(MRlKh&m!-{ z!nSB-tK205&ej)Aq9SI?lmO<>kLCsclNM!E0cqBMDKNrOGzS*5(S7%*2uWA)r@MCvNC>*H;EiYzG3o+^=kv zdKX98Tb_}r`AmpS<>wu?-Pyq&35AH6c7SF z>5C^=0!>xg0u%wHk)JA7+wz40%d4S6Zojw74@0vXB?@ET$pjC7B`4Mj2p`wGqGGA- zY(*XHW2~(2_kr;Y$P@L60Dtx{FNopGD-qAw>!)E3mOk5})(vr4p=;Yi31iCLwo7Gq z_ygEXI3(Q*>!Xt>0(=r`LxDJ7UTiV0+g2bD+svI`q_gB=Z!g|yv)U9+dX2{};1!$4d zHynJxCn21*nHkP6$(4dIDQ-!8pm#l@XVBi7s+}xO_(QOQ9-<(kZY1f+bhOy-OAtjKi68kI zT+-=qxFQ$^cuiAGqfha9+=gYjYm|X`(mzCFq>pUO@k_QT`T$G`>47a^1mg%yDAz|( z>fHmwYPe68J0lOf>YdT2aGa+mfe{n9zH3HTH?J(aJkV%9)z4L~1QLyHFU{{lBuAQ2 zLEfc;W#|gD&Mv9q8Mhlzzy@o)dkNZ3Zs+yQM(A;bv8U2^P21%R4e&{CdPJ?cJv;MN z;S9B&_Y}m(shka1$7U1~x)&Bs3ZYS1P%H02?4I-S-D!u&Au_7>H$6_OEH*@aK6Z&^ zbeMJ)#-r+rL+*lv znHOp7)2==-OyYmAL*xt9?0CmLpS&Clfhg|O-gcCukj3G1{oeL6MOhSA zadt&o@WDRS+M*}jvHy?0nLpY5a<~1!o;F&*u>*9MZtu}vZ}{2vVa{)Eb(1iTnW-`N z1A|RN&Z!?Z7O>Xk2KMVXQ zlPctB7QJ4$L&Qo-HGbc?%TFg9FG^Krcz!5&Q{F*zv*AUpO>%4-rPJ!BDsk`f-bUpo z0QVAx3PfWh9eG+^RRM1*!LrpFrrl}hSJ(rI`qB>(&DO@4d1D|BRnN0Qfx`2Oe*4Ec zeFypTb=XFI^7i^76zD7T4BvyF%ZvIuZ-#rAx3hFnGrul$yqSH&c8Vmt;&+ zP)_kK2v%;V^6UTr^eBd|K7vb+?%XA_@=$=rBo(o7m?`*7uhMaQTdH`~{5#$VPuvfD zeN5}Ol4g9`L;J`hP8;W_H6VLuVsz=vWLfay$Gp9t`_Xri5~e_yzV>@1&?%MJ*KPVY zn^`@uIwrINfo79)OMe%Qtx!h1$E)SK3|{Wa#LA<-TyuOn@;K7RtJl)^i+D^5-~Amp zh5q_Bx{d7Xl?nucyI8X9Pqf3rHxYhr_>J?Id|wpVdX6;@aUb5tKu`KY%W0ElCsFiw zNC!JEX2YXs)CB}x!Et6hGgd=GPFgefvxbS7UL9n*5I|pyqq7(1V*^G#e<>&j{s%|Y zC<@gpJ#DwKQ)Pp2;)r+I(X{!=QitY$w2oLNn=hHL|1IH!HloI)mBOAQg%tVn)ril7 zKtrgL47&9JK29#fb7{*AC3|x_u)sya=D4+$a;ZmC1Sp}rn#b#uqWTrKr*DG}6+c=#v8kJ5d5e#6Xkp>wl9U^)mAT_$XWiS{aY#=aFN~F7#5|Hj@jF9er=5^oq z^OiTj=lq|a^B2eQJxoBdN`Gc&ENgZ>Q9S&Cx{vvwQNI-bq(GlV*66JTRwVeW1PByx zDJ9}{Jnww2DZqR<5|Gm08FID&klejaJ-XOV`ip_F=24)xR8GNq3K}33`9tcLl85~_Rdwemx3tfpL|FDe+!V% z|A20PwjAZ-wQel)T8@#1EWacd1#g;e&q%%w!rdFEG^0XVQ-2$ADGwes*uH#Y9{K~z zo_`{NHJr$=4Ky)k%hT)jkBhybFY2eKdV`~>!%n#RcOOOW)GDF3rkO1{U#)SXxWGnO}l<|?Uw+M}n-COpY1Tss%YEhb8_L!32{N0X67&axE%2`Kz z9;sLfrDmlmVgb%6+7nq>8p>)%64Ei&5Ck^^ew?l?yMRQ8?7SaFRlleWldQ}=6bn^Imd=EQ5na@xCf`Z&_Pv~c4y~$!W>?7um zSsdN0Z*cD1F10TWkiN&DD$(`GeoN}FT|(ji&;93tzJ}F&!PgGKubE!~#S!=y)z!F8 zq-0Sn9#_`zYY#@gkSI{(+5lO${8K9?USy!Y+d93T;5a!C@EH<8O)V1o78;>X2CWM& z#YP5t)gUQL#$R}0tvH=6k$)bVj;LvJXecXxs^7WZUyfu`9{F+&@cs=cK)aeQ_;AMH z(`UbMUN1}@1yPVZ2p4CW3??B6^{)TD`5Ys+T-OPfVMqAy^d5*~`nfDVw$3?f4PMR2 zU?rii?^^9ZPoRy~9Gur!AO@S{=TFTC5fxsYv%@;o2vt${xII+Ga@vFAD@zI3&GK<# zg()_EU8N&wZ+>|DUWSE4XK1i$<#r>sYc08Qdyj2+wXs9aA2*Z>OoMeBsQvv}@e{D` zMk!M#zYX$%W5dzq-&doDF-*f@LC))+gXVA6J%{lO;c;1|4gS*xjo4?_lB zKlG~Xb*YMrhF<;f)|e#5%gw;31v+Ov6B6(D78I)<+}wKaFxuYSQ%0`g zS<`f2t;F37@~fD?-F4qoi>jMxIz~WR>$Io{?0Ppo=zL}Av=gB2_kp5Ydu2G73P}>^ zZk1CWB@>A=*%pdvZ+%(LL?5&HZOAW|=DGUT<~CA?hu)m9=*o;WXM?pHIr5y+vRc^W zIesXZC`-unbb_lNm&D?H6YeobiX=4W&q8lb=LUN}XYOich4W-AY<@3oZ+j|d_ASEe z1NNOw{)!*qcefP=-KU2jpI?`G5eJfwPZfp)_9Jr(h?goHLMb5Ir3B=jL*4cMT z$BWsS)o&t0Lzu+k#^lrN#x7@%4#QAbr5GtZWSaKHZ6+RVxerVrmchrqFx3XY#MTyw@*Y ztl@}~8ozkqKD14jfy z^`KU|7@ew$D#TSsXOJNJGPit=CHVzs9a;heq+Pp27=j<$R=-TkFdZ!Ad!|rVA3>!B zmxa^8<+h$q1$xiY(bt^3lO+@`UG@1ZrO^fc!75;CjT&v;fmxl4dUk{@@B7&b=@#hdAY$v>Or7_668m4PgY?f(42!ZY=tx<+VfMz?RVS=8#W%ML z&2qR)QdJ+DSZ(7=GX`Twda~CaIIpNVfmYW!I3?)Co_`p#mf>Z;7nT?9Of#}HeOF`C zIwA+8C>*U(${A|?Gq>Qt+vDDHQIpY8Qd`h1K_}3L33YH#I2t0Y5w1)-<%pbn|ELn7 zh4Ev4e)xOA9jL3T2Balst92TW7TM3|>nVu#NPM&S-2dYoF)HdP48A1y$@d^areAQb zyRU&cr)zENqUyFSy0l0_@#|-)8_0roo0@@SsP*KkobV?*0>BGV5*qG@E1|l40)i%k z+|}VLNo7jhR5sAudThKe?3K2L^U4cH5A^}VFu$pgb8<74#m;RQ+F+X<(M?}?zwM8^m@$q z5`E;YW@oM$nePs7bD-I;L|AP3-?AoySD%oY);h*rWeHPmiLo!EfX-j^4pauu_B_tr ztA7R!(w6067t!0#=a2f19xa0Lp0&%M`?)%$zS^~B;Fo*lw(^tc&d@>hnSNC+eJptK zNwq@?;v#&ZhL)#zEpZ5aIMQ{LfPed&i}EMD>RI!^0nmEw;qrLiEc{VQw!XrrsY%4A zlba}WZZ}>f#Snc&yy>JrsF^67pI?)z2n#8XYZ0_mwu`i%lN8KaAYF(%4d$@;3qt+b zBJ|&W$k)D;hm+2N)^Iqa6(GSjO&iC(`2K+$y&N|A9!BH3Ahrnc!%##*Xhp^N=G7Qqw!ppX| z%ZQ3bCkGnl4X6DZru~UXy)yx-bG5?*6eIeT%mTn52XyXv$Z_2JTiISz~B9It7@%jB#@A8VNaoJuF}w8_j)i6@8UCQsHTo_b^F zWiIvg*b-9_H(XH*0}~yNRx;owM!L#q57)Ck%PsKAL35d$MW4_cy_$73hunhB5FUg# zer#%Y#2E$=&sC7D7T*5KaI`5gsXR7`aeFFa)3UZk2(o*2xge#!{eADOxy8xV{)&YD zT}Y4ETf!o_!zboHq!j7LL&u`-$E?KQ2m7?kU^hFly&7zvu$Wd{`JJI{Pku00K=%)6!s}t4Zc!_Y$=%D>; zc);>B7A5<5(rW7nKAM+WWS#`SC+a1mBbR5hl^ZY3JXnAJs|Jm7Mffj&wn8`fsB45A zg)O1n_J6&ey6=*MJPh;pA2JPrN_;fgKF2clY^Z4HKXdTxwLBF6$@mZn?ZvJ>OwQEr z1V{)TBo2y>P+?=4uyl=%|8_qMv)a)lBW~c=4i*5NN^B^%&^l&#Vd+7=*O0X9u2 znMElxIz#SZlL+svJ;cz@&?6*H)FM?It0+_yyNyKr3{MoI^!knQClsR;uiCfw zLrl*FJ~bDqJblB)Pfdd>Z>QPtcq?rNf%ov+UYY-W=X;&JFbDxT#bfj*c>cIZFHvLl`o%W&u91K!s)sq0PM^tKc=B3Ntrpn{(69N!Msi18yZ%PcxK1d=SYqRYn}#7BizecN6Z3L|^+fcq z(fbV%*WjgN{P*E-Lb5o5(XzVb5$7Ii;&9R=HnP2PAK_mC81()U%Ejs$CE3wT>qNeq z*}@}rICryLUHgv`hCZ~3%_wQo*#0{5PT%V!V83nJeV;dd|f18fOGabM=G zWBKAM!6uP3#k@jag<)dbH$_&hH)pYDp8<4b?qtM6(eW~~R1<+F<+C`<&Z1USl)A+d z^w3vRRqp=!reo{D{z)>~|7Zjfa?^h=q0WZ`cKustTR1G|JxB5-q=`{xrMtWo*ffY? z3qTVl7L(PQ6hgL%dz*PXu_m&DqVfpY?Pc zXUYj3rl*6#{njpa*tPW8%*gFDpI6w4Nkqdzp7lJV%Y;dvjO>KXj~Al>r-@#EfZ1Nx zo&$RZ`=$G{uX@}=zv3k(fPF#QlxDT`_-?*?cb4GX6bjEN^7yGp@8e{{z{Z(;kGFKluxMbv_n+{W_W_m~8>xPYU-+$O&(o5a^#mVgYegR)^4$4)(-liHR zjY65QkT}bXacN79NW;ifIW;v7J_1VMlR*1Tp0=0j)E43p~|EMsrA{|vvWsd+_8h6jBG zk_HjELe@)Ts316&p5C2U;)R2$E`v+AQ@!} z`eg;Xp1Z+R4Apd&64O-;(;AfkPP$}5K*l0cYxPakquHUXyKBtw=9e$3+{smNu6P;F zU;YJ=&b{&`!gh!xCH&w|4WlQZ2hQq!>Gwkk3vNp(w}tE)!?)8`^}z)u2?jb~HZr=C zKWmDf;f&Liyj{u9hGuw{T6Cd2qFzI4@Kz`{Fv8o%c9|ZFx@V* z)fnxDf_Q0kRK+`EbC#eNt7(k}py|`6ALU*_5H}3A zco+n<+YH0IqC@G{vl9ij-;Df=tNlDnU(m}yIJgpC*TzqYMWGtUdq;^ddI zgktsH&d1+Wew=nfBR6*ss#q&mM}=jgXU{&me7T^=OItxQN(*65j+~xmgS2JNmysfDCU@Eo*7Kg!a1dHs88}S@XhPE(d zCr1iHX4G~8{G5;93cocpeH0mtZmQmyeZFxYJ1sYivshcejyBw%pa-5_Bo}lz1zcsQ zqS#-Sr0OyNf&7pX=UjNii$WE;1b&TN)minWpaQ-}1+wM2K}$vSb|vke58EUk)g%wC zl2ndFE6YW7rNA&?_5+*Hoev3}E%R6BJIioAt!Ti#f`}vj^r7L=5ZB>L2gT^s?S_L! zR1+KqUL$_3DdZzS|F*80BnYj<5Q(SGa3Y0O>Jj4QJYq6TwXRcQqb16aS-vgeCwM!f zj&8vpY~+lGJfD+z2{oZYjs8<$?5IVy=pSnuNFJXm!|wGyp@_P zlZtWW(|5^HRxN&{1d;wkipHhBgxYf=XAUyR%I+h)&86>mNfLd{dk+(YDMy^_y7=pO zr8MRwcQcNqw%~m{6bc+lfmbb|(0Sg4quoH1DyIO1Jw6H28=&qK?QY=+6mGg1oGrEW zPrlbrlHtnsvjy@HHwbeT73P%M!mU(!=~_VH!N!e>bqF ztq%X`WJSe-k`=?=24k`H?ep5%XMqFDA~<4EL$dwo^*V@!dL-BKNp(*Bi9x0=r7rcW z>W6ghFUpe?aAMA3#{H}7au1|jaqiXF3}w)hUye`Uk9a-v8C2mU0giJfsGThNy4R|1 zCuF7S$Zl^1aJ=RFK{%~L1K{zA@d==2;Xm{PGhr;xD8b>Y+NR}Uba+bBeVb&OoJOc5 z?L>BuLOwo08u=vaiR;vk63~tv&gQc})s!{k5fx4~f;327K!Vcg>^FV5mNeAlSpR8n zVw?Ow?2m!pu+-Y$2Yf93V;(QE#OM-y1-G!_uR>x0H8ttGsEM$bDd^*1ZAtN%WB|No zB|t%<&H|ZK`IDG0$C$zE*q`%cmuWlylK#%V{zQWt{8)0M&VMC9Pk5Y;sw{ctXuQSu6u3)5qNund zwcgWc|+*(>1asbV0vk-WMaSvmBfEepc+}iaKm_pYNmy zH(Ig_=&~>dreZ1z96Kk<^f_+iI2&^GyhS1~muiexyzozO0{DLW+8 z(v=7p%TH{v@RTP2n*c)P^C3Tgy-$kp{1t80hUALo6my}2uuMRnSUs1LjdIuv}*HL39PAs-o@84Yc z)7n(2f~6}vgP)aF+ZNB_*om;yUqeTV6y7?MlnRCj9)9rN=IPG-zU<|^CJ723F;;&W z85;6iu7gj;mhDi2s=B67x|Dhr2_FmjUc$0;J_TL1W<#=p=|EC=Dwiq_S=1&)yMME2 zw=^ov)i=y(1MsnV_nD}fixL|@+eRrE}ylhWvPGLOP zV*E@2-%sYgd&7RxWLk>SBq!~CUY%|`@VvPl(h8s(?X@E#*l#L=8T3pY8c`64*^hre z2ykcP8Nv0g+gtYSYLF_kZ5?oJzW`wx^orHv^OzVSOn2+}X+k$8oGt5j*XC;>+P9?5-;P8Joq^dX~9KV9OJw_bzehblzPf`}y((apy{2 z?94>4n`N=FLVdPKE=lv-%1ou|)zqlI%K1{?GMwvpHZD{$Vrh6--8fSOUY>qc*wmAVYwsIP}9uf1}>mT4d(dpViL47(1?P ze#Wi9a6rHO$&m&tK>TiX-MJl&`S}6uZLPJp^K|H`Y0c>kuuZGqG>i6)%xfG>P?Frv z8yrh)zoM+Zrt{k2L3LJtD!%S+da;~9A6Ag8LhLi?@9`(uz2E(x`5FWCgPByu>?J8- zEWVXneB|k&ctJspAB9%4OCWILOK1lc+ID}O6&k-wpql)bx|t?j(2RrD<_OPwWy~&r z-yH4th*OpN)fGD^Vu?jiF9eDqTN@krSPIL!swabO9f5&+Xq>YW$sc`+7 zT`P>8m@%xarvi2lPhzbl2kG*_lVhk!HqxvBo%nmMkR8Dh++ls!bd3*zps#fng(_6< zqVQUotk88X2KMi|8kyElO*ll7w3P$x*=wCYy_u|Bhg-ReG+FLhsJhRD|fu(qPYbi8~_hyk+|<$@lVen3c9=qhhay!wDww2>-VG z#h=}?%3?6mMW=s1lApK8{|+O6wh$)i*}v(#&`u~76DhGeP0yUj7$q)Dr$mqgYJwZE zLB|h8rpiFL*jL2J&bv2w9aUu91`_JUO}n@~`kPKd9^d3+^GO1tn5e#TI6FicFLU|9 z%X$avv!?+*<7_6nCYoh_5_&tvlS>4h457qSX{yLMOY|>75o$0`m^VPWK#fBV!(+*i zK%b%pez)ldmwQvezbDIxWFBOT4CCs1I9l_a33FOx$ae4V2&&p)VMfb9KbHTp^mNok zVI8%%e(Vag@4^K}IF)5g+c3T@T>t2f$J1w1YA@mMalQDdOYU4zA!Ay1tXc4dUq;FEEU9SJRG z$}>X}G0?t^rLTHZs^X{yed39I5ec->{`CMArlNI>bV*U2t#|l$F3`A zvC3;FWXh%efN=0?DDdLDh|5Zs9f$fms_>XQ|NP$xkCvLIpo=qzlE+Es|6!&Ek%(js_IbtB7O zL@{&;UD=#V+%Eq6mh64_Z?SkP#29Hv_xjlTi-8||arH&)=|l$6T4bh|oD+PsErnK& zL1^Xk7B74|+GKL%ep?&EMVl>+dkH9(hoLM$klFY8>C?AE#u&dx-q){d%@|mbzx^W# z@ZHx4iMc_vJc@lq6n01V20>s%>G3Scxh54(&5Tf#X5&y@C?gPdOpp7KiU zu`03yg{z-1U@dn=G?s(N1|d4dC_K#Q!Q)g7s`o=I^Vx5)%BbatjIuatPnP?Nb35e3 zw-ucL3gM>HkFyeL@8wRGp1-BMh}G6p2A27fMVE`cu*&W%)t;?A=!3I)eWXKY^Nw+9&x{{3ldd)}f)_Klm?2|jKJ;#F z<%)?R>?C70*5wg9ggNv!ssb!00uPs=j$?B$XWM&zHU6Iycl_Up3yX}bmz&6JR znVn6h)7kNi`xmojpzBsYm=Ve^O2oTx3M|OU%y;?yap+)x1sk~3$6V71yZ`Whwo1@> zR%n;bOYOpM&X=Hqu;(h7)Y16e!n_hKmkn4&U5h!@kAjB6qDPmZQBI#@_a1-@qP=>v zf$uL^293}ZtyRyLRUH=#ELbC}t14Ps+3URVp%_~NHRxtw~jxo*`@`iAK8 ztDU*^F|MGojNSqsFBv|n&0BSBg2ZQ$a`WitHk%sD6)a54_T`;>_IhJUXyvv(D>E(EhovyQ;kVVnvzjb)Q%k|Rgs}?Gh>lKgKAymNM*hr z2Rn|vt=t$cQda^IZ{kmRWz*m*V##WOJcFFivjyDmWyZW{O%-m~{NubDC$7vBFTsL# z4`FxF@)T&BW=o)m{^EbtX$(WTl>e1)$KCM$*K8hlLoW21MTHFhdj90?-q1G|vZ<2S zL$#X1EHZ##2`a?737oPDh+6mZ7!GyW9~y??J0Y_i&yt+`g=)jZC#w3#vEqfuoH4AW zPcbZdZ;b#GdP2kLCn#&d%tEX5OyjfYxe&r19IXmT{1&^FKanZ?ID;zwhrRtJt80Jk zS_+sFGvox%Z{v_3M3L;Qr_fp?N`L0F9 z>vY=YE0Pl^Km6_EjV0FgQF&q_sRlE|aMN#TTw#nfH2H>NKX~vL)P7QR()C|UFwv2) z*GoAaWA7;l-t%x|*@kz(v^BUk_>1dbuoCwXD~4PGR1nu^`!n0+)k2ia0c$M0&x%-y z)a4IYsvF3oxJ8Ys8r+beGi={@{ZDt`Kf^!rBHRFf42|WeU3%+Z*^-b>WOq34@a-1L zaq~`nQ#jSJUF&Z>U+8ik>E#XgZ3T&!;d3Kbo!2+?I+JL#Hop6XT-bfDGc?19()@bH z96$*1S8j;vnA9z717&}a=gi;!S?>~fZhlc25ZC~n&$0{U9%&a{sV&A`_IdS#?`APc zU?jkDMtBPj@!>spAlsd>0695s*H@xPu>qNjYOpod@4}Kh4c1ON1Ag_(Fa4?Csr%Ia zl|;nd6#W0W3qK&0LC*W72XRm-Z{5BL4n?O&8v@+dnJ3OdY!i%Z<&Ym zDMM(-vs`07njE-jP2#rVKC}+}IR}wXlWr`41VDTeX*xZ2QlrX`=>c``4|h_WukB#2 z+}gb8R}_Nc!j7&#VMG!lbpRjexR~}R9Bh~P6*DTw0+{5Aq<*^8Ks9idF(8;4xOq~v zTpl7YZ@XB%pKA9}1*MG4<%i?Oqqm@oAOIyr_V4f`~+m z_UKxFsO!xs7PT-+3;bHMS?Au6Sxxxf?(^DcY4^j_bv=eCw=6ip;22MQ#8c~^VWw<5 z{MwtW5PvqeIT@=Vd=Epoh z`I9Sjgq*ng^wRW8igV5sReVt`r-PcgF98u=w>l1`GT8-Q?<7HPL&x8Sy~|n38R_K5 zVBA?hy~iJYXTmBI2#Nxf@l;X>%*|sD8@8i9({?%qo@E$$a!~7>29M+^zre*9?BEXC zHhAdXCm(!Y1jMlwX&8ZzYM_p~yvxRyE8_ANt0)GnviOJ~gh4kFclpnYF@*J%D0d;u z=v-~|<0E5#0(Gvdu_C#(YtKcp?o2m*w-M@4U$#o$2j1wY{jrR*AKSi;ksciZ8LOqr zF;|14g-{~SpUsA*DqDX^REu}eR5~c5)v84q%=)WX>!QX#K#vBMI^II*IkvnRe7|_# zXL$wZmNUi+6uCsRXBoQEB;4%P$^}T$G`VD5uSx`m|nNZtY60G;> z9(&O`qF~{ohJ&%2&ZeZ4iDw&|A;R*CW>Y^Y_(%(2=|Q2I;M?l5-LTa}Fo?t3E(UtJ zh9L=sfU3#+HvB`(ynf(-0`%{3rg9?rg;_Z|t*Xs;$nB{;25B}ucja7VsA#a81BUoy zY;?dt?-3h??_RU#`N>}3!`w+bDrd%J)$LR z=8jrP9>a->y!vPE!K_vTYaiOYJL-!cqfMq>7M(;uy#75@A2@I}`?68-78Zea`@I=< z*eM$HYiX^(`xM(noJ~23uZ<%_g}WrEZfoL;BBHN)Z{m+1Pr5v&B15|Y>Rwbpd;pV4 zHstg#Gh>#&Z(c~p+#Ah*X30kNJc?{m-9D)akP@HJbapL`;#FS9z@EmAmQ^vr8f9 z>4l7|K>wo1dtV>i+|;q*M)T&J{!2*_sRTdhl}5OP zk|M~G`SYX!n9_63Mdg!O>_SXOEFz!pysUADCOUH_U_Glbc!nY;zXqMg6Zv^{xADj= zvWl{L<2em}Z`ac*R-nNk4^6+@EIRFy6{T2z7ge49GMF~q&JliO_j?#s(z#r>Ahx-+ zKj%8+t|aFciv7FHuv4Qr4hm*rrN?&|UfCFeB>TB)izj;lV>-Er-KhfW9g~CU#S#0z zvclO5125Np%#Jx^j6v0o!7hRA<~$OnDmW@k!{C6pADFthEQQ|Wp1tvF++PvYTaos_ zVoBdFcg=`F4wWvI3hZe~w?41Wzy`UMxL};SUv(xCbG}Gy$~qSb{^E}0p@XK5Ue3v0 z^6MR>L;?|1J~JlUH{93p-(*`3RDz8uX#PZDM<+m*-K!7_8X9B2*b8_+_{sV3Ay+X0 zYmR|BHXWXJjN~PqF>&x;#H#42`G2t=j922%LU%G}sGK71$F8{Ev*de_7*CA!R>A1;75vFr6bgbEfque8qN+RJI4(z$89c9X-+*<5P{U zybY&gz>FWHP9&D^o#k&o%d+^2E6)=aVE*eTk-eUaN#3$Eiz^tye*G8xWfL7CyUAMG3u% zdDU{9z;t);9>?m>Zu>q_Qw#EZ!5}-(52Nk{FNHn-FjW^zi4?5?FW|=+-8~Py8dt22 zM#Vko1i^Kumpul@t+*W(J@A|6?ZyVFT05HR>yT*kTSen2$aVP_QQ^RuwqWp4|5`^G zIM;h378qd=_IHg9_O4j9R(pQ z7TD_$j_Gi^J^#A#VR3(hr+FJ|VdhybD##=|nw%IF_94^WrFPA+*WCtV*{SxybWMoC z13eKfX(DimDr#>@pI_0?HE1Cov>{!;z+AQVk&__NEo(G4jk7xnf3){=s?3|kzD;H; zi09S%dBQ0d8?dhE1B;5Dyc(cCJQc~78Bo(ai_?9SZeMN-Ni4driDHvNlIT^^oIO_sg+6d zE1%LXuORAWj?mQ_B@QYtZmK3)fVE}T^9HDGXNW!j13Dd~of-EO@fqXWKI!FKSN{f2 z)B-U7-3|HuC-+th78aCgnj0=JWNe?EYT7|NkTQ)>^GRtT9$z^5V1XM2x7kjMj@j8! zxo@Uge53 zX-qX1bW2vm&UuG;prflZyVk|Ut4PIrre`0Ll(?l_2pU#9Nl@sjI%NX#sp5eFMKyd+ ziILXw<_U*OuFh@t#o+{3ks@Di<*0gkwG`!B%!=JP^0=}x*@eOdxRqQ$oy-=q2N^qp zk)xHhi|(d%8;3`;E!KZJtvi0)w`8_3J}|o$iot)>ZA6fgyL~6KU4;N&*NS~NDk(`Loyb4G*b}GuyV579TA15-hUO>moYZLiS)&0 zZ}UP)k``?jwV-0evh>n!EUuhLd|NTm$Vxp!3R{3mW`HyI@!?)-LvDbAS~@$0{j3XX zp-BS%;6omqkX}$J=^?8jYQoF(0-b6tWY9jG=LZ{*8F zY2A-Ma|yldSEG4vez!@yyjqGx&ww8NYJ9H#%ijD2g#Gz-gz?u$wF3$g3A?VZfP_U~ zcz%Rz=JZ?~7A4zgR^uRqXVqbIw_nWdAUS+OcJnW>@IA~@{>E-%o<;|>ahEC=eRxx( z3q&^V*1M*?C{lUf)+_x7g`YFJ@%#r4UXI&-f;`kdrp>joG-KV>&!)t$VMv1cY5f_( z;EJcOpW2|it)70_U`Wof6+P#TRVwsY464t%sDJ}j24`)Ouh209bhV}5S=^TO>0MHlN>rZLZLks&Lnd`8W{ zd=1ZZAFnoy;9>HW*qExZ=c1-!-g8?-Ybz+(fiGs;4_vicn|H@+yg`1ci^VlB1=u%D zP*CSAZan!LGFu_^cZp zdfP7Ry;t~CS3#>V8kVkB}%RmJ%%fIp&aruQ5^_!LDw7Gyp$hK%VDVonuPfnt( z#x}mskdSn*6z0a04Ye#-;CDdEkZ>|M_GhEFuFn1LIGZS66_EM6%+VbuLT05+_fLf| zLoAa6ZTAwqcWzYAE+NZ1A5`9OTANlE+Mz^Hh2(#Nlc+ z)3t@_?Ous5*C%H8f4IvJSF)m@p#W+2CaD4yT->rwl#j?_b^N{KX=#iPd?MVYR#k!iVQh3d~(m)slFvu zZfPSg>deu1m&U^1!9c~hg^`F^8z65Xj;C5~57S*TQ_bKja76*7m>F2`5?29;RFfNG zFivDC;z?g$(dvUMChDM9bvBn%MkI;fP0DhE5XHy0HG2q_Kv!MtXOy<#$MdrDj*KNA zPTtRZ5=vsb`|C=RnYq=eH=ppEMfiIs?*_ErKhSIkAaTy07#s=c7S_rqUBq{$WRpU} zJpN}nD`^!OW4%trBXeHopF67jnv<}r#sV8C!Z_{@1^v3HKNc_0R9ujO{V9L~ zoJx>7abm9jC{5>mL)p&gKvI;xBYDB~4UFr}Eq?z1H{sOH#l-3~WCX*9OpZ-5zs&Xa zSd%s7z#I)_-b)=` zC8M6_GLszd!?qtS8iWio6)|W4`rv=JXl2{JBi{S1uCE1OGMf4tl-WBsk8P>4_y5V= zWel8m!dUR!V^{7g6X*;sye+=QucvPUeJqPu1XSUe2s@= zRv-WBb@Xn`A*Tct&Su)VkXrT9_jGd6NXvIO*kSXS@{F@Sy^!KN#b>L9%{il6CWX`> zXofY`HZTSWyAcC?bt7ywfBLhMBx^AYrCuDzOoHG(6PKk&yZM()*9l0Bv~*SnAAeDv zzMjq(x$C^myIrcC9b%k@Tle2*>t?`a2)f<4y!(rC_WHA0%7bQmXfXmsp?uo94I$e>i9f1XIWa#M=<@mZG)Zi-{5>AX`e$$f z2D^*yBd9DgW>X)#lmPe3B-cV0(buRKaKCASE1X;C525c8RP${ht?qI}WY@O7)U$R^ zNrqjRBEW~0L-A;g-S&Y#=|w|l@OHR2(bd4e6OB0cn*U%B8Lyzm>ca`1C2?;Z+RQqy zGS)pGi+qO6ysj0-B+CKJd@$2G2L7W-+*>DTxzZ3YIA#Ng_c9VkNk|d~03{IZIoK_# zKxr`vt6jEhQt5-!>-+FT`_@Spgf0h!k5~0a5v@cu1A5H5@g!S5-v%h<9%XAhiH z_1_ed6sNg~x4&x|FOa0nWpmk?wY%t)&c%hNZO?KB-|aFR*^v!95T2nXr?iGY3^J|+ zCXa1q<>sIUwvGHAc1*Wb#k^&o|R+cCbU%yf;RRqD4=X zcPw;<*7^fa3w3}SMUD75#6@dO=za_V<;#wG)y+r3&DXlX)fvfEJLdd5pUxOI8SCuSTJ88xmb(N;k&Og3mA$MnwGKEkEoIy!^%FVX6}#6*}6aO#eqs zjV;~Y>)OT1e{F3HYg-q1Zh#*~~n;+7W#jJ0~$N+9p9KIL&Y$s$C5d&#y zd8=H9uF3E@lkU!rWTVM%1%V{SWq6Y|(6mtKMSUAR%{UadOC@?(uuxjNO*i@)TrNkwl8j!?77+x&CqKRdDZu!ii%H6Q$K zm7;L7@_wqW%0pgxUBa9%3-x?w>q7LY6o*hnw4Ebyd#BR|fUYz!Lyr`G`m=8ZgT z;cr;<(J;#B-y@|M_>Y~y{*Uilfr=zZ?4wkC`+2gdSr*hEqOBN9i_O}=PCW{{}A$+gtUg6xybYqZ8H|BnKGJzz#=j|iHPxa&cHF)-hu9(Z7Uzv8mDu_|2Wb0&T-r1>QBaTFmp<^ znMGrp(`pn%9qD9FG2*damd5JXyqZ$TfYbeq+I*<>1^+M08jPj=1h>vnH^)drmqjJ)K{OY{L zd>mW#P~gtU>Lu3sjtTqfnJAH-w!WvJCB^=yevfszL*vN?4>_dUZ&PLO7=*DiM(s4D zB-|T>`Lkio;DrX+ZZBz)uMkU_V>j6dDAplU zRg>NjEu)*3+dN?(u)SxVLuR25I)-33V48@ASUKI*M)2`Q< zCb}9|^2Hn;eY?y>-=lM&rwX%LWX%0V2TKjE=ID3yIYE`Yi-4&D#h5i`)<-Rus$Ltg zNS|?vvPlHO^|75gp1UIIY^;pST#m1zZ*vK#UCN!v#Q}Rv!dR`Z>rbowfI%lwm=cQg zZJ>QPMygwb5=tJew$59)Sc@Hixv38Q`nOd48S&ppc+pd7vCwVd)>|pRQ#3CN3pj=p zsxNd1WZnP_tLYBN-_8SCp|1}bP6%%JCXc6PGaEdk5Tn9bcAZr# ztT~HQ%*kSREmRtmG^pdT!*`1PKE3$lT9RT(vlBPrXzju!hrv$g-N z6YL)=pSJ)km285ar`8_BA061r>io%m+pEGkaFR~j77dRGih59(#?u`$aH~A56S7)R zgF~!q)!%eu9i3_*H4kBKE%Ts}XLb&#y&L!V#(;;Jm%&&RBw-nsXPZMxTxIEM8WWvoyGAf#{mV_@GT$b7|(f|?m8^1XXM@S)M(D=z_jM}+P?XwPp#0tcxCj< zZ0^rg#QGqt&c35xFe`cz)xaMRn|_~vr=yxPxm!Y?s75GU4oSoPvV}#C$Xz{plVFnk zS;uJtt!e_+2zT#0<`>}ZXqD~9joQ1=1{OFyr4$fb@te0k(x|(ih^NpOIR>AB*a_8T z+8rhE+gQkOCR*t;4VI2E^Rw=Lg?OKN^D6z&W~aa(UOMMwY5AZU`R`2$tp9JFU}Wu} zj4Ow%SXupX@SQGh1}6YWHEv#IW#MiC7&e(Fy5s^BzUV2kKY}=Wt^MK>RfO3m?qFf8 zWdhHEZU~h-*BnzSgQRl(6lGM?i=e^jrIFAnAzcy9p?!ZlnoloIIE(V0Ur@t-xZ3C@*N@@~9~X>6mkrqSe`Q@~SQFjWMS7DW zMLGyb6_F|l0-+N+0i<_PY0{D2n;;>fDM}4U=qMOEA|2@h5{iiQ-kbDq@V@W8Kfd{s z^JJbUGnqMMoxRsy>jO9}=`I~rKRN=H$gkCM`l@fof;bkoI%{4}ufB*ocjFV>6+x8NX)i-CRZXt?uebmNUB8t?H_ zw5i0jf@AkPq2Bo_EmmUw`~LyZ7u)|9dr6Fa_xes{xU7Z@9W)WcfOFXI6_O)ZI4c($ zQ|=??^8vlLBE0-1)aoLbvlPR0gQY5^6DLEAf|8etL{BgajmTr7TJ(b7k-!xVQ?weW z2x~l>aG=RkXsyqX(Ec2IKCuiZ)p&T%*{o}Oex;zCghgZ_%8zrgC=EhFA-t#p%CzI1 z%*vSYA_oPj!GGrJAe^iOACq>IX+I?QVi|qP;JhH)vMak%o0XQFJmbrG3&eSPB6@VO z5kEjaI5@r}zKkD9OX|Wi(uhBmd!N6CsG`)r96O;p$w7#q46bp}qu~(GVCKK0hF7G3 zsb|?@!jmb_lh+vi83> z6z9CN@y`r&^^YPR5I-nS=n4In%ww3{c#ykh9m3If<>rHt4(~b3=u~3J@IHi!cx0jG zIhPl~Uv2uJo}_}t33fl7-ckaegYt2KEQ%Kbu~CBmEM4SGYu#~O2NwSHy^_8 zd|=eIa=u)cAOH2a`qv7xcDy>#*>f5AgJ-VoeyW{*7;m*p{qJoo-DLTAXb|s%QPj$4 zo#MKtI>G}C!%1(QUEBb}6Z@KEs>#bhIm1#g`7Rdt8HUn=b_6Gb{jB4a@)oyhR-;uJ zewX#w@Uoo%8T#Q2yzqi?IqTNb<`ea8ac!4116w}~sOJC&8$Pe=d5Ny@6)oUAnL1f? zb~0OS#KLqS>Z)a8B!@BdwLq1Ib@TZPZh|D|frl23Ug3Oi_L(!=&P@*PAP$2C*YCiY#b$MZtI>L@HfmeAx&f;>jD6Y`eetA(_;iMj;4dM10X;OH@s7~^P z%=W%a!{10N(^>Ps`7$SGHJ)Mz>k8QUT!L#9Fq0d4<2xFkeT(~r@^4e_!Bl{xIHDr* zggoMj8Dhm(_#vPYFr}0;i!mT*s-$GTb-LcBb!;)6wwC(^|9#!sgzN~-C;`<(f${I1 z_*)u>L%5#`CPTAOS(~gBqYreCzs{uG;IH{2M;F?fU!2XShBx*WN~p*a=B^9z;h_F$wvm7lo2>pp0(a07S!n#yu#@SlRhO0T zh!gd_Ovikr9(`qE$dj7$1@adg)X(5n>uFVe!r<5sNrUB5d}>#t@?bWxkS zI>rx?w@2|Yb{vDU+oCwJ>3M^XxK;Xt2C#=Au;*VR{a&g_v{45CiI9zpxHV$A70?9I z23v7=KwAM{j4=(R*MQBznk$j$1yKH9GQdn+ojxkjD`#pf^xN@^XO9EA6at?RZYL4N zot*{uV%-P%e!kOL$B)CsS4;?fTe*SUS$gjptv?_wYc6vcu2vw$yL58Zmf+OZ7#vH1 zj|lSliM;y-4;r2UdxWVTXjoqkE{SsfV9jm+t@KB7-Z0^b%Hj~;-#ifDaQ}}R8c?YF zSPS290ZG$vSta43XXde|O>h{EtLH;K=mCBfm)8ZQcc5>gcaeQDYV+w*vEQV!B0FUI zi)(eR;tjM$iV?LuwcS7^Dv$ zfx!n)h*WOUbZ~RoO(Q)=^7q>#ED7#OYtL$8lYHtY)kkSC1*Hvs9I9Kya04`FOn1$F zffMnwkyoJzStJb=$tY_kDPz(^2(0Kzgrzq4M>ro^->4RzKbPVE&E4I~g2>)cMP0|_ zv%95)5aFH2CH8aUg}i!Po*7e=auy?!)8KB5L8vHQZGMJ4eM)Zx6MOu_rXP~in3o%| zcCCGW!YaxdeiwQ!e2^mg==D4NA?G20a%8(*vT*i}i%*5_6YrK&kj=VLcC_%-iSqiN z;0Xl9-}UcG{@+|o!i7cxUZ?D7V36;#wx7n|o0s?~={U351BdT#F9m+k#(^;^eWh`L ztYwc9gVKUNe>Bb6-hH?oNA1|&VR)R){)1#u^e(NtW_#>&DN!X2CA~k>$CE;(re<&Q zr;e_LrA7#;=<7UCa$N}@C0fnbW&TIEsE)sgOKQ*meACNSCY7=1n{0~$`Q zrfU?Imyz=rH=O6n@`-90A>kqMT&GCBsTx0PENdinc&nc_T-vb5n$>mrgG%cGmCI&x zGLtfCr1tGG&#bN{Uqxm=L>s13K5TUC1Wo@vPG3X2ORh-EeBE9{w+kjXVOrl0L5%I6 zPHve7E*p|?4SxO?Yj1?~(&WsW_M_>LY(9M**jWnh&JfLfTfIW#Zn=W9l80xpV?=}D;Sdv1KJBOdkQ@RF_OoH9Cl-aca>ZjFgCcyx=T1xj~__0H9I z?CvaEZzW1KWrs7{D(A}<5uPV76tj*@I2A-c%gSVcCZ=d3Kd#UCkOpj48ErrF<{EbG zZ%|rL6G;0E-(14SSAA(S)?~}U!Hy7J{NkBmB3mZMA)H?~YFhTCX1A=cj}LuJM0)#) z4#+~ieWR}8h@MutSc;IifM(zk_|b_$fFKqh%gu?FtdQeCId)h$<-8sGG9r=S_PqY_ z-`#l7%YQ2{n9k&?QmUF3;}hG46Mx;Ts0iY(PA&!Al{ip(+YI;H#GFJBbe;8M#7Y3j zTT4ySuvKMuPu=EAzQ#YdDSQyYARYH zk`%HySXSV`uFd2@QBxdY&zpQ*`8hRaH;&&~P3 za(MUsQVgIGLZ+YQ!*g5hn>2KvD1JP6cU>5;Bn^_rar=WwUpPpb_N4mYmH)iHI`?Fc z1ogwUB1l4|i<++fz8dH^{%IBg``8rg8ZI(JZ)RRv8Owj4mb|nK52h!ThVkQ2R!ATa z4Vb#US^hz*@8WmIn25`ELGDq6DbXl{0f!8$GMI)D0{zXBI-eb-k|GOpfb+U+zQVsOW3y-{I$F4bKE z5)T}|Ifj>WkIOD0{iQZ-P+xksSEVKH-Q%E7v_H=&59VlRHyx~-NiSG$nq zf9Ak<+>eek_B~`KP<1u!P$dH_!qJGw->(vsXH1ML`IdsFx^Vy(r5lEvfhTxq%_5+{x?%aY_G^?Y)_-%OUd`?#MN8XR3B0^q(Ob%2;@13dQPe)7NA0Re)*UK_R&4#qe--$?b&hjnT4qc+iiQ z{G+q5Q=6ad5>cKn=iiWbE*VUqr{j2c0MrAPuG}1Kmijs-HfRBXez< zZdMdPC9I5Jgj(2!b-mB7Z(MuJi};8&MNYy|>3e5L0!PUW_uu88JYx)^&@(RE?GSP*ERLn-$;DGy3jxx(5}&LOoq@Xi zRHQIJKqYvUuw1n`@k5#C_HmWU?B9~pF^2yQK|3Kj^w~muVxFCfWcdU8etst6_tn8t z)<9xtcjG6??AHj2*i{;q$8cfQ)MU`h1lpLbVHK%@Q3960RZ5tYV{gCdh53f4t~PdU zH+F;~4&G#dykuF793R-<_u-7DDKYDa{jM4073nLS5wcE%z1gSQs=4XXX z=$)bp8nXbOLL=RX8I%;Sw;Cu>LDhZ41Iz*8gTiX)ucVFOWiM#rVzl4ZB!rItnV4Lw zrn(6O*9(c05)gWCxuTMaHSo{?#%D;O{>+#@EBHRmu3< z!N`><45r$hDZCCI&B0k@cCsoSV7^z#4+dq{3l=8mzVG_REdK7wma2p85=VB^YCyxm8o}{K@qkV{AA5k5L4G{xc=u{AxNmZpM`k@?K-TW7)El)g3k= z#sG!IMk|Ih+GAa=RO*i#z|@`v5}CB{N1MM3?+twmbwtsaAUIAn31IKvMA!wk<-KYD zOpz&)FPqXkzCHOResAXF95Xwv7t`U72lj~l{oeBadr0oy*69%~;~~4!WyobocE{$# zi;$`GPkXD5&)~7jlu?N_naRkrwZ}+WEn-dwI|+WT@)Qhp^3sLtL^l{s6+Yr31_Yv>>IADI-Os zd<3HbQtNBUw(7*Z>BYD}{6t&r;^X=J<*_g?m)F|LLbqoc=o7H|*yG&cJj5-^g#(3Z z^B?GRI`$uH$AmocHmuz%_7@^TWO2xZ599FB<=aY8kPssSGqq=8#6QaJMKMKqwCsKe z&oM3Z{(%_y{z^Q@e}zYg#|9vT?@n$iF_F`!Fz$~v{X8>~o&qqD$|7QvIzJ=m4eURw z24MIT{lCBG6i@M(Y+(iU<(l}ST~&OMzf#vWCiM2v>Et8$X3g~6N3@fCfP71UB@1&f zU@^`Gg&4NTFNq(cB@tkhEX2i9fWbk!y35a~0`e@@!^A;&W0Ue3roUHyHY`#KuoSQ> zh9>k;m02CnPQI%kw~O%<6+QNEomx=xaUUT>zrI3r|CpApXs)K`Ecm6alr)FO(v1Uovsc4 z^qec)S4g6SmM;HWfxo2u+qd1%V@NYoysGNulMcY?p$KjVNJfw=`>Zdu72_1t?rZAB zOng8?&78K3)q9x&^N+u(-n85euR7^XT&i@kpWpBkOS(6mC^mp3L|hc}f+PAxm)2{n zO!G*?=IIS&$t%-&XBBZdjacP5ZM?ib*P8Sil?00IDznJ124~pOWG1jTMD#GgZ5PIR z@CP%a;dS`~4_fr@FqJNisd*rFa<}n++|=CTuxWWFad(s(dMbB)i>pcF3)Gw2!l>qU zQ@c9jpYP%PEaG|#Qbf#WBhQSmPMiex`&Cmk6PU`Dr>UJ0br{i>PUe>{Up&S3;(vR+ zG~qB9c>{YnutYh5v58RnKCQ|5^sTRt`{PlziYM3}!Y0N-SgtFH7Jp4vsj3xqk{vgL z?^R(Xl6zI_39dp?K54%6p!lBCRUmaLX;)eQoT(lEDFguwc)Ldp-&w?Yob2?p2YF;_ zEO~ewXS$YBpGMXliXsc{nJe2yF5JY5)u^~7pxG=3%WpDsBQ1Y4+YVW8I93!#nY%4S z4ib`-{)mXOUH2`MdNd{YZdqpzyuO4Oz=0}4 zwwwpCAucY(Ap$&>BDxEGSk_H$!PzRoW^o$Mn7NHuFL6<;)IBB<_!C7W^8rd3TD zjl2~h&{vk~L6`|LpY>;|DeQo2?;AuRLOJWcwo^03!a8;i{KU!`hnFL%vw*7#YC_bK z4hW$yYl^Z~?xE4{ziq01O@1uzOrd}iMXl&hFqvfIE2%$=$R5X><8_ibHjayctd~TIEO+X54}Y+Y#A8nF1qD)sdcB9F@J&% zl%?Lhq@TR9x||_rG6);$76Bf!xQ&RI%e<&O3jIrDGNp)(`}q>e--f4{i;Yc&kK*0o zb8_e$&+J(WgcFSAhWcm!?Od##OEp!f@a&1ch;02~+wCWU@DN0wv5a}F^dF7Hr|}(4 zMbCZJa3dPdxJ9InX86J@k;nfT5`e7<@ME%oJNs!b|OQ$NGtBC33 z{;{QI`Yt-P@CTD#^Sc+1OKH~=77G`?n$9O0YrMks7kuEDG|!J-=G~R>clw;=dA342 zSduCATO`XW?%>o|r^}oh3d3B#ZEk2%e$Vw2fdb7`u^V@;Ka&KW0Yl*WE5iZ&$iJtG zGp_F#Xc~a7J1EdB^5Eh4dkVJr|9*&0Fs}IqoQ}q1*yY7M4sbw~wUx>hti%5gpN!#* literal 0 HcmV?d00001 diff --git a/doc/imgs/torch_profiler_trace.png b/doc/imgs/torch_profiler_trace.png new file mode 100644 index 0000000000000000000000000000000000000000..76129aedce0aaa18e7390a130456baddfd7cc4c1 GIT binary patch literal 293635 zcmdSBdpy(q|39v}q)6(D6h&RGLQa)4&8bjfM2-s~X=V;NWo+vtMW}>wKE#~n6dPlN zB#B{cIVUm9q1eb6zt^VTSJ(UZ`F(G{!#}^T+YQ@md%a%I=i_j{-ye_Xb9-a~HWuU; zP7DvvA9_2s14q7DBlUoPTl_7J4S0$>4ov}1{_xN@)92wSixpUN z;RT-W^gVCy&%?93iSxUq#V79?56?!(9sZ#m7d`Cc6AO`YE$=`3&5(~0!b;hNB8 z?*&Ei86y30@ZpAc*RJ*bu}$s3w&VBCZ@sYN&U;P1J9{+mz28xK^5L#$JiL1h&Kvys z@bUYr&vO=Z4xg)d?Cv#@L)#&9wvlddy9Eksc-y~@e2wuH``3XZ!S~7kI>1vcmBKOBKR@pekE;Ch z40xnB_rK9l^akvLT3KByH$eBv+Y2Cyfw1AExT;?4*|=ZdOe==29=QyMnpbHFRP#{%GGvb6hO0* zgA3p5gXFwd;H?WVlnn*9_Q08t(}FIhX{pa&-cuy*fLeKw6KFG7kquFuwZwZaXtqi= zqn8J5!kHh-7uU(yu*q9^&vzo;&G(TeY+2^1%9y20c=+l?vRZG+#nqU~7MyLqPnQ3| z;`w6(JzAbtBZ0ecB^L+Gm(K>&{}}t&Y5dzXJy&rMR9lR9bLGeqGba%~W;0lB=J`1z zaBx99&fR;kQ$e9UZXqyK19lew<*CU~!OKKKd~iTX&5Q$*=BQteozoFs4sAPvmml=b zD*YJaz4Gb9fEA=__Xp_Ll6WBrtMchqfv`W1JKC6OmUvaWC$9fZAfAzZxb(5k(YssG zac)^VF!V@1slP<~$aevwTX1iG_(d(qRo)ly{ileCzrM zr1Nt+^N>t-h`k29Z=3#(*#Yn7=Ld664kt<99Z$}TCRH|?!V=Llq0T2@RF>`mUJOMZ zHCWv%H}t{STo(I!I=`&-G;A=*Hn*x$l4x@!-@~5h`%K6NstJF6*+#*?FipZXa052{ zVwBo@njcjiZxR)K;nJ$M|Ik7qUItr&wBHDm#5hii_Nt^vI;?d1U68i@F@CXm2{gC* zeMz-&Xj*h$fw$8}$F~AfGt=biW_h!=%dHbpr)hDsI&tx8{ zck154LIcQq+}_c<_n9lm<{3(u_3aLXy&5=JpsuoLhG!-Cu8I?^=b`I=;in@%3_z4Y zSNNd(Nz5D>^}Pd5PQ$vjZ6HRC!GYPjQ~{R?+Om?VHFb?ISmAzmKq$C-{{R@hawmLF zvr_@Yoabi>)LWg|HtycX+kBM9Vxy4MMNFlc%Zzlq`l6$<@hFrZpFUh>pkWp%6m+5M-qqzWru5`IOsL8_c-eGZ zkvP*?z_$O+`06V8ph`qp0B>u4qBxYi)0^4W%yg>jz`p2NA5{?uh6shPR=)y`vOr^OVvjat{98}p0uh4Qs)78PWKf$lv84(0l_|EWk9W*31 zh_{1M%S;);HzL|fM*6Yru>tQ{%=~jBQwZ~8C5o-8q&rRhzNI48DJo_=b->pl0T&=i zZu<@BEbPyu*zAbtJN@0a|3hNb(cSouSlJ1=+SeVvFq>nqb9gyYNsUhq{|d9H^vOLUGZ}~`uh8Dpm&Jn@@U0*z1_Xm0vK6L*;-NqSQqkA zQBFEK##>29>{R<*C`oYYrX&XS!D^PgEL)L`2q$0E9nIH|8cwpVIxJVmu6AucMh6`j zaze`{W|c2|S02O|vxRhhrVefl8jM!Fex^HVj&uvLkCzXHyS%h~77!1*^~d-<2wSp3 z;v_-x;fJ<xiJ^O zZ%+Uht4wuIESf*-KINNWV5M_qsWG*SX|uNe`D+QyO4q#pQDJGK5$tu)NPRGERd*tO zIrf7>%5jP$Z;R_}VVcH(Wc>t`p&mZjV+^ysZ7zjb`8FIqpn4|dxQk?y`)}!*r??*s znMheUm_9gW^TXYBv_g4M4EtjN6fWN)?jcMIuFh#5Y)O($n3eG!%f2RM!hff{`wSfK zpfUS?AOC<7^VlcUDQyD{tWQBWxsj)T0fru>te1t}A|M`+ug^htq_peG3@gqbqKqG3 z$JLEijN5wKoawKN2mXrokdFXG#r^^R+9h!%?V;lhI-}{@VJTa=0R# zD1>ArNq!GaD)|;~U^l8}3Wbd z7<17k-v_l`Fz!9R%~)jXjr7{7h@i{P?klRgi)~D`lw7wHhqEnPsc`$(u~YEh9Bg4P zpQ6@+E#KRgCjR`R(uCe!o=@Gfo=T5jOc+~0l$)?8#1F|F3c)^;AFcSZH1rpq#otSn z{R+9uUO~J#_0+g=Z+}0ecaMGdeu#9GwQ{kHcaF!U^`lpU`V2=Da8>17kPD24(eTo~ zv6_|v_tuAs!5fN04HZD*QEE@UzEuH=I{lZNq&6}1TIKmm+mGRf>KqokrF3kwpLTnt zZREVEghBpJx!!c}ghdeKAzMrmku0gNirOg$~at|30b)| z8XKB}a}K-BFS(=EQa4iQ$bR|N_eSn={01=4hk~yS*PluJsF;b#`E0jR8oTu7`SY(^ zOz%{V<7XxTX%G#OU|@ueLgm74n|*+J1ZoD!?zlvE_K=TNlTsbScT`%hFq|B@82EU~m6nYkttd^0=_0AIVz!0< z?W=I#{-*%n`N#(3hMm_+NvI$u_Sl*#_DGvi_7C=Z2-KfU2-d(>uxZa`)2(NGPFg~s zCv!zw-+hF+U7X#fFP}k4zuNy{)o4HdPo*i}q@uJ?+cH(n=6ym_UBk`-yA|)ZI~ASu z>>rRK3?j7?`N-2EV93h*3(Zo3ANni3o7=VH74TnTh>_9b?&D`$5*4VDCb1h~3&B? zUzTxT0WnXQdt&Pa*>7M1djEVv$wcC9Sf1&Fs(2Df_OP+bxr#5_VkDl-2~v~>9)M1w z2C%oj+TbVdz4BSoyKJY)c_>=Pa`j;(8ZUVPCb}Ky)E}BS!|Jr5yR**>_!-&4g8O=YfTRgc(7Y^>Gt`tzsb&P zwz7b0979vzCHc)^zmyEw2Q_Y&M$F&&a^5{opp9tzqqSqO35px!!|zAC_`HNHS}ZT< z%s=Z0+@=d{C4?76-#e?4D#?)In zZ`adMI6QaJs_Np=!i)NapMnz77RSGkZYzEtRZFhtsLpDoH7^eYDE*ydbq_XV7P_%7 z9-u2(1sGlK5bdm8E6#RUyLM!>;$qvY0Rg1_xbf4)+1T_;d8E3WijH!>kzknatv)RK zs{(#)T0v|-xUlDUDPLarC)h+D?fuk2Am>G@-Ki3d1C|<=FG){?(eWLqKPxnzIW+9_ zZjZc{yY+fbh&^CklKN1;?^|3JcqitO0hGNnS3#R5*VF ztKB9rT4B388m~U+m#!e;o>^2o|84T7ob+M)D_Isgpb6!!ah7Lavo2%6+Uak~u-IsY zFY1SKvh2$y=pAld(Xq=&B~<>U`s*KW7_A_0!@h-PzNA-0A%EpBA4|F3fH#*sk2}mT z8xTm418EcpEiL9C_QQQ5EsP;vZy8&dPoA3GbFhci6~Fn#~$252CtqNH-}d zdM@6{{EiAQd9c8JU8rwt;*QqXz>QS5p(v8tr_fVdt+LdWq%t z2xJEKDpLs}!E|!JmH5C)0( zI_J%lb~qxd({iZ@qkOdaP;-AsjWqMDK>Y{2=Tiv#{T_^S8-#iBRNSTgc5M>iRfbv{ z>hJM!sSgQC_sNaUG&wsRH9N^;a&uyq8nM&)mVes;{r$VJeH(_Oxt=pQ3L%PfM2OLhx!mbM& z+$0KOqRnEmKZ&l+evU5@@eq!9z-UXg4trt}iSCq3O*GWk@Ua+Oj2ActB6BvJtwo%~7W16tvu1U5tR zGg$V*Y-)v$NC>`=`00qL%(wcRwmEMbp)5mnIWEZ%>xEAz{}@nQp7j2{)G2KoP`w%~ zEAJc&sBL5L2??l$O-VqTBFnq)W}DJLhPLjX!s9xP*Y0~ zXrzz@Fq;#jd!FR{d8YG9@|pAfq$rf-S~VCF(7Ow7=~D5!W76q*U)F;AWx(_*BU?ih zLo%*dqU2`}fMKu3scRn<#8*eUaPk$!xV^B#w~eqVOWBhY+qRBXbIHqA2b3tge=}W+ zxb}N&)zZLsHI?3igSNu^L_j85DY?#puPx!78S99k`o>8p>k-MWrfmHBoVd??iK@>> zc?ozykt5wksfvURn+d=py6!c<1hpkmF^2d7+d}7Bs!wp=iZi{7a(YyS_npld%|2}9 zlZ1GoIX}6mrUTd6_N1JB#s09!d#>>xA`QKnuf8>0djhGxajD$0%n`s*@Ai*Jhop+# zcVfZ~W&4j^Ku1XtU*16L+O4Mc zxg)Jz;#p6uj1To8^mSx|io)_GBC31wLwlt00KR5S@x}#v zh+7l%phAsJJ+>wq;>3E?R%egsQ)>+h3TRW36N-ul+9|~&3q-%B&mMa8?4I<<`h&GG z5R{uPD@(;^H|<}|zZiy)>!UE-+;_{o~aGoR-VX^vX zct$@tRCGFN<7)SMhs$^7;-4{(aZ7B&X2|zHaV4A%R4BW0A}4~qWNQW*K_YrKWZHdN z|J^pc-C?beVuLLT`#cHU;MLwPMqKJXG-OBi!SL?=hkC}>Ux;18N+hH%r+YVp_^NX- z>#34huazIpA|$;KdsGvy4mriG_%oc9aKv>7#205m($3h5`LY>`43zid!@R2i(ZXG>D4={46^y}x9)T$P>LPtz_5voZWHh3NRi*q zy>ZGh#vs$8zp8d^p{;9g@9(d5Ub#<}5xxi!{daU>ANh)f8$js>|H_pN)xF?s3mLfM zzgzSb#~Y!-{5e>Gf2F}~$~lc_gUpJ+f5&HM(hkfenddmgf5*PVzEKta@NP=@zpy|L z_E(A#Fml44e=RlKF#G@4pmWkjE2vJP*sn~gGX|Wh6*%3KZ}q==ke2D)C%`pY@p!MT zb6{J7f*fT*DVqPERhSIsp^&dn^RnVdR>at3*h$G0;|z?^nxl&AY69^H$12k{P9ftW z8yWs{)ii=Cj3P{%fJ`3*TWyFGyM1^rYn5yI^0fQ^`9Xh@6;iY^Y~|*;$-#i>knZo4 z#VYu!R()!a_u|`-bJ;y(tOBa)z{7e0IDYQ{eukIh7|TRgp=(k`kA2NS%*%e>>p8=} z`MRPQfh^P6W2cd^j}&(}lo(SJM}ntm#NK$0xZ=ztTo?Zy?6R7-yrdAS&xweyPBZ#N z!wo9&K-*})lGCnV;dpz$O^FhPZ)<8Y7-Swa!_=As>)=DaoXNU0?T758C&Ay2gwJLV z{~FRWSlQM7Z=+3g2~(?l4-K;c%C_VGS91oeK>n=uyl&dbC_CQXH?;bl@NknK}a2 z{_k7h1|H5rmhN)mzZHAGp`tZTT9ih?-xsI)S6)kln$b7aVhUnBXH1DrH`xkzWAX*JA!`XcM@P~T9P^NdI+*8~k=IWWP z2Ydfx?g3*W-e)r`j^Pb76vTf^$(1}tGm9Q97)UR!@y5IC8>F|c1qotS#tv#L^xHfN z-*zG$1D(tIcIepli@u~7VmI?>Q1siF>P{QY6zj!7mai@8o4xZ~%kYH}jQzIjB{e&n z#LyemGqHN$-;67@xP;8WeXBanD5j3J{o(a1(rzkjoALmODqfk~ui)@}HDFpZxw^dr zd3tf-e-5$Dpc^A!bS|>x-xBDcfn|kQK9Sv_qIsBrUZ@^+c(kGb=!(MCCcV^EZIp)V##7p_*xK5J z>WLRJnh`i(hoeud#M}$2!un3kUj1BcqAR(5|PR;Wf$%u3CEXuub|+e;c#=h zx7>r=4>-qNywBSU_c*-v)f-=Y&W~9^1^S>T9ry8P`|{FqDoB?zv87AFne zvXDntU!~2ZzbPv9iaZ>eLqFTNc-XsTIdtZJcl8eHsY((SE?wo*2)lrnONpC<{r#!F z!_c>*tz>L2M5Ls>99H8C4@##lu!EiJ8{Ydge@<4g>JR>|ksCipznl(F&6v-Wr|o@z zwwGQwGk>b4&gpe{OIjMbFn2Fn6c;<}qsy+{>+vhiEEE6S-gEuqi%TiVXdumIn?;=i z1oq44ok7mXme&7cHnFMQCyZ zf9&Be-V4m+q0VBnA^TWS{6OVPZF<>N-DXyMt5$LvsJaSOKBVBKiPh~Hx&p6hX8%v% z^%Kr;^onj#I_R#x;ZSCp!4Zu1kxoY;D)Z=Fm$UZ*|8o8ycYohZ+g(0)g;EA6NQ+XXWo_0C7SVRpGb*gtbHnX(vo*= zdz%o=4b``-p2flGl*?5mQCBe{5nQr)QO^{F-Nk~`2`5xZS+E=qt$WX5= z`N4KT5#wi@oJAXOO@#WPA<~?`J$vn?7j5rk{Y9$ns81Gdu0GnkcVlU6KsIq*TXAw| z#L&fg+Ie}z->dp&zqj-H+y_Y|x!1p7C?8^9d#eoHsX1S}IM`nai}GJoVD83PGe6mT zlU=KdjENVM8==YGW%=}JvFUAYI8gZe2g8T*<=zH;Gs{`|j!Pc2fa~VV740v!@~dgq z`cFL96fl|z8(TrUwg}goQWKZMpXmSn*{uO%`NDWh^0kA#Z4-R-zY#NSOSF61;mGF^ zL6&XDd!!Yt_?1McQwLd>lHu8iA1#er;z?FodR;`~LfyH$M((sc58W_y8!lBQHSTNB zEeW^90O$L3hfuG+9okD5sSyE^*&Sy`4-iQHYgMMR{tyckk#|5&8=XI9`Xlh7e(g0! zh2$P2I(rMTXX$JXGMs$@FT&~?%AFP2(D`ds4leyEXk{r<|Dg7Mn0YHg;?8;j0+~}X z_LzlePtdJf>RGLinrkU^7*GRZ`Ov+MY00`(11*=~Qe!g(}-zOrr$bTar zK9si~anVP*s3jIt%D$+tkeWPA%fFeCU#s*~C$aI+clc!;X+sC}P+(i)PMYC1yE~s) zTe!w!oT+EYn%C%@7%{h89o#>q5lIF2u`g?(dzY+gjpE#uEOjfvH^DtryU!-npqVh? z7_6^iP02=OZ1pV_!=AA-og!jj@ea+cWhyuaR-Lw5j6l^s6g#FQR9m@H~^ zy|8+y+wbK6pjLh*JdKm^E=Qz$4tGqgpRVt;aiX$V2Mr5FQ8uSUW=`?X|1p|OohRfm zcu_}XG6HLww(1mFO+(5@%EHy^Nk^_+woww;VdlTjtFBSc`mSy-$%^9ZKp>4X8Q)El z6N+r7QcROAP(U-yD^0gJ!>4s=tvXhra7kFe=rTd))yx6*`m-^aA@kmQ2RN$1bG6ie zaIAl)!Ik)8zTV!1b^E@#^kq|7iL-sEHFhU4EEdU2t?egJ1skDLtA7>owOsOCWL_Hx z?n9k$_UI!?b?P#!>ZSwRm*(aT(9YrYLksou(-lLA^`X9BGN~dEfLr+7V6`d;HF3PS zu&_=0$4BMC0UP{#==*0G7^fkQN=ai-!%5%9Bq^oZA%|%kB!ca z^TYXS#P2Z)cagS3|HWcCpDjr|9fWlZ-?3e8IomW*p&|xXhi0G7{?Tw}E+pK-!ewOv zd7qv@#g>GRBX?w0)i1z<$<_I&v=b2zx{zvV#-P~xZ zF0=t3XIgb5a_>@^s|L24?Od!50A}oO3i5p&yV1=jhnB1|Q0K1nvx;cn{hMSCnPxV# z^{$TI2A|m!kU=2)_?FrU7Y%@A0c>6=@jrF;BLh5v~b`?JVg3I6{; z(f{|z6p>qEK4`6+>0Pa=-oDrn@(056?TDci?x!Nn!J2@Ypd`@pq5r@f@Bq>JQ-47c zIcvb}N#l|y$yR5zXDi6y{~LcoNkMH+dp1i-pmGUa>ht(lRcrFI(&2*6QY)s7N2S%? zBUOZ5^E`Msqy;%D;QS4rr~sMh(4-v-{veB~VlR)3DA<)|v@GZi1EOWLZmF_lWq^z^ z4>p!WR@8jHD=@g%_|T{hYf@^tQ9$Vj{rlT}=8Q$#BD+!V+V4ieJ(rBDGtWj4fbwnW z)A3I6;6c9#ag`eueYFrDV((%9kEhnvs7Iz}*A_{^q4Pw1JuzU)5}kTDbf#>syiL)6 zzJ5aoczv$4X}5oiB)Ya9yYZ%{GH^JAR^6sle+EJPp0qb)z$N1FptY^zxDWbU1K->; z6GcD1!vRmw3OCx`7S~Xn(L>IOm~<2YJ-q%k&O9sjB~CEo8IuAbfqs0xx4zUEE--X7 z;?S4k>WQRs_hIC+BRbX5uUW)99l7+KDn>oBu#lsK%x8o-Oo^$~+UkSWTGQ7a)uYap z@CElnrI)`o2_-5tg^KjFWf?{d>aI2E{ut;jZm@Mm_C|}TeyBIsWcFCG7ZvBNpF|i- zguG>c=F|O#cS|=PxM?r3Shc-NlvC>tpjKzuv!2w?#N}Z+SZC!L8abs!X1-JXq;;8b zh1qSQR4wWp)}?#>Mujrcc%Zj;e8g&^VafPEJ%L@Oz{(Dy;gu-XBthP9c?4ZwGnJcs zv2BVM>vVqVtY!Eko%J7M-KFJiiAu`>tNEC;1vpN^!sGy$T)9Yr_^gP2V%;_NNnsd{ ze{GFpi|QiI=7e~p_&tDd#8S4}^^XTO^l!t>N$!M%bQBAUR-m`v9)8}JtkElpUh`X? zNcKsf=zEw%kOQK3iK~4~4d}j5*P;3lAppd=?ndoK274Ai5;RX-PD7(++g0B$szFmv|Dr@-aak&WRYe-daN z6fI#AIGZD>%PydgW9poDqyZ;o`)!1k-=E&sJ71}@nGN)1 zw&xa&svu8gZ)~N)1qBO5OK~ad`)2R&1N#br7*96lcd>(?IK7%aD@&~E7O&D>8x>k_ zPH;LhfY4JQ#K@;yXH1w-U)1fPqPI@*5{D0^2K7VNO{9pjjsQy;ó(l zNt0q85_>>)owru3{bsNcRv1Ef$KburI%P$w9#sp#(%S-&PfDmfNf;U&>5B#$11y*o zmAL6fowaXg(F4V;D##1|U5277N`j%Gf;OS4d6%|TZ`Za3oD(3p;Ka)mj!DUdS&*uy zu38XB3Vj8GiW%=Y&UJ)&q=n=Pm6wFcrV(o(S zy)2lyRd^fYR$hj^Pq996zSl#KoNMN=B2Mpe)v``{~cW3~V3Z_%3!Y zr(Whib{6Q$ZD!!2nn$4Sz4+VIIb#7rrODR}@$G$jY- zoqQ}`PnP;;jy+$XK3wp*%ixVh6~A3~v!T8+lI=uq%P(G;Qc?^Qq;In#hKQeXy^r3O zp$=nxIqy@-n4kkqPv(X@l%Db0Uc=01jyd{7g38$LNKGzQDAw$;Isqx7^C8 zy3aj)i-_p_^uNif`9j}R&FPaE3&w%My)Isd10I}AIZm)?gmq7v7=VmU)D{j9iQ6kp zbEuC9ikrTPW2n9dQY$%}TvQA@c6e98a;QjgLx;!%vgwRi#nmb!3-Y8M!RA)U-7~M6 zprbUh8sajA=2)!HFw{!fU$fni)Jq5|u3fmevF|zgW)emA|a%nwcG+(re^_Qg*3a&}?>lT0z zZwWegPM_V{#djXD_-f<*4qXdg&z%5(cA?lz>ZCuz?Rz{=KgDenN|LlS%)er5y!F5&8oFl@`Y=&evGa{$KJfGYwN z!T}gNjgW6!c+sNzuns^rAa5=mG1DdI!%V1|b=3KG)jXIvHKW+j(SHeHC6zmsbXf*W z&2aLcPCw`cx|VIgVIqsu`x~Ke-aJ=L$rbukIrH|>J~khnb#*YIkooQc5b_OO4SbSU zSoZslM4be3b^g6nEJIRdiqI{7giU*)!>9%NacjvZ9PhE_&m-;f)y?-EH1Sl6jGHwo$y4mVoYY!?wD2WFqdH^LAbUCA&DY8rrMQT;edR~Rsi^hO4X zmT_UgUQTn`gv-c&K}#VminoTKxtO&0>rzz z*|~(E<_5?$N5^|h4YkE)3WdnYdr0J)LZNwPW-Yvp()1>AHzu4KDS>CO%&1Xyd+^IA zSdFE4>eWi>BmOc!fB^zD6oLdDAV1c`YK8%fsXHG*o^j}cgWk2L>(51 zSp+%o6|8gp(a~D&8#&~@)JVcGQRj3s^LVh1_j)(g8UY3Dv6~gU>}yI*n4q(|RzD42 zxkp$6V#}dN6@!GwCK@o2HWih)+jbro}|9O(V{0P!b@ zb{g%zf?0bTeI1evo~(`WaNagDzGBp?ZePpLzmM!V@mm+#_U(;ZlO*)|Vi(S+@9I6ltVQu~wtDb}SP-kwneADD zy~G_ZE97Cj`pB7n%8qs&)To%(+&W2T)r%uB&JBoVZjN4q(Rp_0RLh}#aM&Nk`W+1= zD5hL6AS8^q@6bhWORth#{}!?C;pYmEexv$+$O=0cCQwD%>2f02|Jl%YSCJcXngKXZ<)ZtSj46i5pQ$b)S%I zx^wJrvm1-i?tquK9{=yz0Sy$+3Z#?$&3J#G6v($>c=Jo)i}#`)(5M1I{5kf9OlMnAMD!=vJ74O+w z;2LcJ4YRNEb@{#@I613D7qr__ru_usa31xZ@Wt1W!$NfAsT?*y>HNStk{wXpk_Bq+ z%v4gHq9}_gNe+RGSEAeOr`a!)9GEr>@7xP7j#j99+etX)9t0F^{0$H({{r<=vW|RY z@2Xa>KM5$+3KS#i$Scvk!07=*$I14r;s!ei5js+aNVBpkj7bY{yp~TpWmc#n89yaZ zX~(_7Kx&Lf!(HAk9HNZf>+jhe*pn-C$&GmiV-&cr!`OY1Mj~CFC-w9c2nh$VoBI*4 z;o6Xw(;;9pLQMZeN2)lM>MxcJD|OK3XYe``F~=PxCm9G?wR&|1x#HJ-)6nZHrobhS zJDOY7NbtGAQ$2x}MHeutxZ>cy09;Wx0E@cx>2I47TF)zb9-LBS&w0_R(CHB%TK7M$ z6b#@bM@1S97{_vAWSxHxjes!iqVIFu76`D$HWbed50?yibAAhE#8#bOw`E*Q&H+vV zu4r~g(u_Q+lI$ESTWSn}f&8fp$+79zQ|h!Y{%>=kisw%wp#zPs&Gbu4b?xkP8h!uV#h}zy@VM3w+bc`;M(Pm7AFq^a=(p;dO(DSZEwG`I$nTxq&c* z?Mw%Ae3v$VcyFj@H&aCf>5)frXTHdysSmYmf}tfN$L)YjTin)5y)sYs71ESbKN|(W zN8g=s+#x2Lf(vhiw!(WC)ydHZ2gi*}A6B}LH9?!^9#@Fqu+nA4pIehPV*DLMk(@cTSMi3vTT`4s8cn+_DeL5VzVxBsWD-ChV z)ODDTxwa}bCokmDE|bPrCJ3#E0qDNp4M^|Bq@3Y&z$ZR|tkW`T^=aL!MVCpNnXd$MWuD0;=(;X;1+(^)b56%g z9X=}+dv?ncj#Idfv^>snq+1{m)2ZHoslNiI2H-&F36M4|-=_r1H~|i1$cq>*5IL4? zG-@F%d8BQCn|Su}i5*VoK!CmM?YLjr=@Px^akl`;=|Du0U|*v(I~9O1E5LmmXI07w zi*P1aIh_hN9CV?ALuqfH3W4mgOtukFe_3c;1r_IZXQMucY@n;e2Y*9W=*kJP$l?UooN>OH*c9|<^rHmn$^BSy5Gr^h@1R&KkvH}ka_@g zC}gg^dewq3iA1Li4Ohqg8{P?g`+NT2`N>=ecD1i#wcz5OZCLU&?Sn z8S5VTKk-?-DPYJz*Cmt_+;fQtb2Gy54Q>X-=d61DwAxf#s(heXpiUFQnS(>-tS6^8 zQ3i5{)f34+&HtE#m7BznYDet9OilNuI&|c6G3y66k+2B4F4a%`Y_jML`(N;A7*sTz znD12YZoeCed1KDOIG@1j5>6gPXZ<38Z0T_A@*Y47ATy{#qhTBj=S6Wt!_mhUai|9d zh=F0@tVjT#5AT`N?t}MK*4Gg1>JjI)ij25&9%0Qm<_Q3tRd7`iMznaam!0nVnXcU= zU1j*xodA5yK|Wbtlp<6kSB<2)PWK0^B-J7jP?uQczU?(^Y3b*%7;YdK?rtWtpD!rY6Ai*Lb_a+jEujH?0g{ z;pOIOV(PS5&kCu$mZQ$Gy@JNvQ7K;*!M2#l*s z0Rz|c^c_E8xoe1`+!=rDxya#a`NQiMC1HEOa`TXthn_i4Ns)9CKxNj;QmOI1WUJ`= zxNz-_eEwO~*NQp46!t4j|9t04?dvnwE+5dGjVTYAyl^Mkj!02C3@*fRt+rKd(_zDr zjKOTBW+2Ej*#D$qjU7NgU8)Bt_fB<0KUxmDss_5xn1N3v3_pbBeIhW0SdZWScO`C2m|3AKRLOa zQ=V;!?X>X{pz}Metl(x{9ra7f_jaVsN%R$IU$)&vlD#ZilqjLu9zcz{Q~m|vq?JOE z^7S}cF?TWv3)zw582?eI$?aW&8AVPa9h;cj=@fEAsWTfrNz$ARD^(lYIFaz*(z5PaB3U+UnZbfs6)<5o~d)* zPa2vFIK#<28zo#lf?HrPj(T#KE6=+Tc-zr}&D3G(xlo;puy{s}6O zh1DMYl&?Q2-Ln@!<2>r!GTa?-X7Q#5Dlr?ah#oZB$yFcSq%0-bCO!K!;k;=f-Y|Z= z$Pu~6YvZ)lQtA=OloRrpr@e%Wx~cYdN(U~PF`>;}Z;OT+q0N_?og(b%kl{H`~zz7q^sjYvr#rBPn7#i;aor z&2c$|`LioHC!+uT5>X?t_~@LBq7Qu~pJg)>>VgWsUF=o;1(+JQ_6b$K>43((2mdNQ z)bDaVRD?a8x3L-YF=-5=lR#r73s!0wiscGDt@%y<8TQ&_xmT+<03!p!@TN5VGEKV; z5cb>x^c5H4fFdB*(i+TC4e?R1PI~`&`e&3 zU$VscibwvkdjZuyNi5kpKPK(hMVLX^>wr`NB7z?vM`lXayzpEe?eg+dr-+Efr>Vc_ zdbpUO!S&I)<&6_RtBko~iSBVVt?@hEB!tcrPus5GEY_esAX3<`V9$SvlyicBNGX3} zzae6zuI*K2w0L5Nv2q+^hey_>4{Hlzyx1@Ap#`FiKDgQLhhC37r7(e3lZBzr)nQ_JUx_^Be=dnjtU=@#zF3Mn}VkDU{&6xeK5d@%v-Qyh6I z#E;p8Lg1S`fVq{>fKtwhKBa~*&MZZkgCx&u=Lnydc zR{&R2S`>)!-gITAS%p{ql_{TW)MOtHn8aV*<(&O(IWd-}GUBwt<3^j1zhAF|xkSq& zZ2|(#lHNnPD(h#t@*i%&>an?ak!R+^;KZvT&}v<>1Lj$KebaW3R8Hw%f??>$ak(6L zsV#{+)7Vd8j8vWbMV#r+o^R=VKW4%6_b+Wx_q>%;h1)p-5)U(QDS4~rCKL+&P1sKN z3u76`|D>Q?PV3;fBb8oX3Pk>!N1OC<$EXFfNi9F=1F)H$6t~I%s3iiCDgir-^R-3; zXU+@%RCfyehFm1YvTg^Cw0dV3E8;g)dujQT+Ez$KY>O~)xLehXtMvXdbS6BwQ+`Hv zPeN?td#D&&ZYcNZN0sRp(GHX|;prcH@;ueFlD67-wsB|<(PbXH`+i31WV1d{fI#Iu z6Q7cWOCx_;gQL4VQm3npog-}_9X&wjB6UQbJ0&0M{jNZvpTzoev&Vrjrm&E2RJ*^IK+dXfm5r=lsGE=s4wG_~H+Pc|xYBLy( z+eCF^pnK%LNz})HUO@jz3I!^>z#;1PsK`3E9$MAUjuWuUW#3TVEARThC}bJ+Q#-PA z>^C=qL+QV$Amc;aR<7vez#eGj-KOS`x8}#levA7#h4i0f!UtNOH2H1^(YDywEEIwG zigqD*XQr2eNI*Me*@P?iRe-`UdE`Sk636X~0eC$QP9WrJrMnPII4V~fdH&^Q!G_gC zdN!jD@)~WTZA?ES2VCR?FQA*_VG15xMa&BzF}Ga>6n9?X>r(<1n+^kX(_h*)^KF+) zM#|+Q>a0FvxGexJrY(mrjV=MLGj0*z71wweXjY|8>vYoReCs$g@N2%m+E5BU{Fj=6 zc?a=}Ao&VNFFC@-oL~e4>TueZ2Jdgp{*g>g@4*D_=4?LUU)?Fu6Fpq$I}L@TQyyzru;% zLJKq?7mKjg_UNCjs)*7uL{5HwD{9eIxg05p4z$kP6@Sovq%}CAEN$hotOdAQ1{FAc{>iPMu#K+uLr(U>l z6&EWwnB1zo2_|pMikx6cz|LRDFt^#5ycuLZhCl0Bqt5kzg3Pf9=mSlP?V?WwF`_$Q z+%Qw;ay`%!s^IE2V1k-)oNhuFdml${UywWvbR#rXr&xL!ZUr8I}HCG)u@ia3{ ztdPz!!5;+!YM$FGk~|+EH~}EtR{2d%1|rcgV3YEp2)1bQNGoRs92c7?zZsqmbtoMjkQ=5Gu=>EuyRJx|2+6mTf>vSvnDZ`Tbfara{ z>{QCJ6XG*(r^7`0s=jQ>(#PtObBSA~XMwFpfyy2ok8xX~eTM?6)!fB|+eTIFrGcSW z7vi$udQyYH-mSUfnz=~eJ>P&e-Ku17f^7eGl}0Ob(0DOny5NR*=MGl)bXJri=Pen& z4qiH)JKS1gm|L>!fR}8YLUKY`Q@*NQDcU}Lnv zG*zpzMe8QB*S1DP&gMs8WpqF&qUQND=VM9cI*!{jE-)Htz|Oe&(e^g#^G2vow6log z_B=di51sA?uwG|$3s~u3%iJqd%h=068A`Rj1CDi`C;I?f;wl7h{DHi+3T(JC8~_eJ zdHx<8`@eCb#J;w%{Uhj21m8?iRs6pIbChQcGNov&;i&s zG5z@nW}OD*!BQsh6*8V>8FpHOy~>>L3ySCakehBRaeA?hu_Hlbsf_eTav@8{JN^6* zY$?%B!;Ib660NCZUs93kdNhR+WH_p_Gpf+vB}PD;{}Y{y#W-7kH-I z|9`y0r*xCt-HA%LyHyg>!6~Ozib^O%&0$G$9CDZ(wpBvig^uL7B<3t^bC^R&Dme~g zGi!-q<`CQ1Z2s5O{rTRX-~ayp{*V8s$74(FZSU)SU9ao9Ua#lt`FdV9`pnPuI3(%J5o2^XC|1Ym2}J zv_42;ttT~r@?!gVSKB8*Z#dzx-__B-(JQq5e6GfZ6t!!K8b^%Fq~mh!$5J^J2kD@K z5ALvEtjq%>4MtZ|w>?B{a#xseZD=Ifdq5LE(I<9@g8dAPXdlDa9~p_@<3 z;JL+%jA|mg&OFb#LPu*bzLcE@drt8+EOpOUEGQM(v0~e!xEIDFSM~mCWvZ@{7IXdo z_Uay1W@u}J+Cpsw2JZIpJ&u4{+z}ol37H+JwbBEl8>(-Kx7kpn%)niXFn(4J~ED6#7rG`7h}C4S6Kf-!i{{VJ|NtNk;*g&PL~pEr8-we(l5Dg%|L_GbFhZU48| z>lR3(mnSHz^we$iS{rp1)Eq5e;=W=xc}2fWnm8&f#L5(BN4Yhe_VgxyKmMgKQXv}k zZjFET0vhB3krL_*POYt1ezpa@y(sbog5xhqd+kC>kI$g=x(pUM( zCTw1D-QBqpmlJyVbFJ`-BEWwm;H0Amf!zX@_5W|%70>Q7(WECf8)D;bxZMeWj;y-0 z%jo*?(o6T+I;6}E0KKxA>egJ-^m39!3>AnWN9}4%Y`Btln%@{x;c6m9-gav95Wk1G#<;%V{U7UXWdo6QR;|4I{Lz<@?iip=q!)`>T&VHy*W!g@7z#2zt3}*W>9B? zDe~8I!G^Wz2Q?BmVFyh7cA{;^&t!F5hkCnYyW!s^^LKz`Ct=+;-@7`EUSps<)YJXP z@+4lO{_%A#d`WElf2>Rb-GjN;q_LJe2S&_jse^T)&lDnOcS6`Zj+Xi+pVf#sbga?q z%q*nkav18&etkI^?4ImO7uUP^?*Y(EO}T)APCeM2x2ZDNb;!e|=M}Q|{CVfcTwd2D zd%7Hq>CtRp!E*2ajN1!FBXh=H&E092cA|?}V`q|IEGrv`w4n5@D>OZ?&%L&RT$G6l zEe%K^c+CL=g8Uao1QtNuuDFREBYP(Z(WIr*-pye;U;;=tiD{?Z&QKu4!YDc0pq+=8 z&gyj7rcJU7X{&H=ymDFNUpd@^_i_lqzZTit)adI7IH{cqI5*k63toa_a-bjoaeDwQ z{Al3O0;GrKo6c z&gR^hfF027D=VRRHT>t-Q>A6l7s)^RLwe(WK+zVu{w!54vM2|;z^|8d!1({_ux-X> zOns-<=Mye+_760l%CV9rTwiE7>pkZugO!c*-)%i$aG#XofVCYAc4uQs3vtrD^&*bH zoiq84OK;*YV1Lo26E3J-vd_bK-6fs3{x5p;C)d&<3sl$cc<^6z$$v6u$|D!QD-Bh@ zGE5_U8`bG@zZEV3oc*QLWwD#6j9SQ)xh{6FI%6gIzwYl_uYJelp2H<0@ek1|ZoqpS zle(J#5$RLxt*4RS2@%6tNrNpGkcWh0a+FWgRK_R3B-*#h|u9v`^gLg`12MyrHGVE^(inD{hdH*SIGR6+kuC)NqoE zI3{>=t$k<3e6NEz1gM!E4Dt*6KBNyiu94MYqL415-T0jh3-#aoW4sdUzW*6v81|PR zOi$O-q5wx2ta3T4ptDTFrVFGR0LpCUDr)_$BYGvVMb%$*E4cYS=|nltcs$u@3;1S9 z^6x2LMSw|nAJk&_R*Qla#ZCTl=-0OQI`^&*?>Je0L;BSK`0QNdfg{Iu1D{8eVd~}l zap~&mB7JmwbCk2g!F=dLWQ)ZLJ4&q2u<;ze(W{`Oa-gc9_-{HH^JX@OV@dpmy5T+l zmSI>Ayb^H<(5!Im1^ARN1Tb_Z(o$k4N_`2Be0%8z?d zL-wp<)(~;#1O(Q){Dm%R>u{*YMGhfZ(i~esg(%yrEo?#g1|fA-7|<>_1PUDUTd`K?-^>IaA#ls*T&sR~TQ&7OS~qo34;1ju&1 z_BQ<~{m_Lk4JI=>Ve_7$lYNeYMTe;>EOFOm7a=XImNR&yMR}T800R%-NJD(NZGw}d zTG!^4w7|>bB6xSCUa?c^dDduXsiL@jovWV!JZ)u_+Sn67`RV`*Rq(I5PegY|b#cvz z^WW76{fN_y#2Nt#=iK99aTYCP(F2ZTgy~kN#amLlwP`saxMH{}a*0ykhv#}D%Ytrx z8_Rmd&NNH&XA#@o3yrl17>yPh*rmjkP{%yZznHjXse5C2d3hEx$XjO92T3hEhoih- zZGPMM_@lOUt55LZIGgL4ggLZ;T85w!5-+5h89a#d&2bMAaDqBQRF}Rd`F(tGC_BV` z3KU2k3GHv7n}lN12PZ+{Qw1|E_=%xFuYUG6UY$p2m7p|oM$;n8QUhih>ZXas^{C#? zbZ!Z1oRG7l*0QC*q@bi)Qw~8gQncXT#4EN@_YBbQ9gm7!_#E%RhQgSD102ol5jR=?^7S?4qeTbLLU}b}KxjPd#=nQs{dYheZ^$*fn;o8I58^yy^yhWi)rk(v zIuIu-P%bIwQ+_2yu{>w|QR*6pz69tl4XQ)pqr}9IN3`XnUNw*QS`;`#kjH;Wlo-S| zx)Oy8EmP0SJGiS$HS1=jd7X?{!RF7hOZ?$xd169KT#(1)n zqHQRGxQB_Ej*;GBCzs!S7$O+eqxmq?hFwhN&Q;LO6c&-5*AA_FFFROv;k~vHw{5;# z7qu|GJY^cB>>T_U6WSMV$bXaK=VS2I12U(Q>%2syFS^#X zRE^J@N!fQUCq!wx&ZnZ9r9g2GZ?c`&paL0vTQ$%sG29L}%h z?qAp0gKmr9-rX{|39@h-W=$nkbrX6$6r)Df({h4d!6za;jicFW5ExP=y0fzQC8`A>qx%XUyUi2qz zesZJJjzgBLx+Gt7Mj27&=)00F>ua?QsL}#9lj)NmzU_D0fO8OR9w$V=T`My`=cfHf zTH`gjY6aNR8B2~sFg5-{h32K4I@Y#Hj}t}8P^$+0HqvPi!}TgywaV+QN|>El>E(;q z^wB7!u4HQb#Y`j4@=HrSj*t>a3lVPcalN%7+IDc&?(9$o?y7L;Li03s@iM?TyN_pI zX5d2U%bkihzZ}oL@@SuitRq2A$~1cQ?o@eEVKzaz4`0JohDceqBzbVGhaaUfj zRVRsw(oJpzOz^gXlz9_Vic2N36m^$*tfwTP-hdmmNoj)hiaI`XvC;~Ux|dk`GO4Y+ z?k>{+nbFhn#<@}Wp5QYVe1NUb9)!{oOWB?o+)QM4QXU~apen${5bSkg6G%5zxvfa# z^i4@DbN#3CvJwtRrKv@7m}ow>4jwy=hS=&`sytPj$#%!Z-?87Z>l|dOcUG>N9XG(H z!o(?k(j)Lj556T{{}nFJgB_`!kiw{_ddb|KLOSrn;edYM&U&9pM=DuFP*MA6co zT62#dQV46~y5WjXxc(+8#Eu~VMuZyM=8HSRik&QH#Gr=SSw+kkcxg05b;kw2nTom* zrn2tMH_FJi_T)Q$3<8M49uI4Kr|`3@xp`sYhq$$Rw;ECsJ7Bcp;ygWj#M~<4TdC?` zQr-NXZamkV zTVf#p8`n^?ujQ;jdMzx!fW~!g3<)+gQUw73L$^?C+--y4Dr?~IIQuoVDa;k!-I^`% zzbYsuC}z2L&r#O>Py-s%Xw@QqP8iDWOl-U$_oqz&D+}}tT++fTSW7c&5xUpOo_ant z$J?Qj^~snw&w4VwRte;SPA)s3W}*b%~CH7^=Ry!0^2cS26N$X9 zCU{Zt0e1TJziBES-@~_mlL>$L+CKT;j+xhWeanm#T^hTT=`s~pF%(bmc&zjL^NgfT7H6^po|eMz z-p)Swb}KcfhvIiG`^qI>Ww~O>1P2_9YE~%2IC>#s{Fg(2)WYwx^#-q37EZXp^JOCWT?YSkAH2>p>F7!nXBU1eDRR(4@KVyrm zDN8@P;V0?3oADS#T-qhVxjp~W!2_U!AtE;nx}Ef{@%-h{>?RxClJ0x%eHkvQ8||ZS ztNT=ZvDMJNXqkP*u2AMTu4(sebxRWRUOM)Cc&b{&wfC*I_w^45|_#aOdM8%1*m$vPft3KQD zU|q7tqie_aw%K}~UA^B?bz@0hv8udOKyx4PL(l1JS+?FqT28CVvMM|W4sJ_wX^)2( zSywAk6Z5uOZGo#ez2|P>!fbn+++=&*nD5)&DxFj1xe()y2?n=Y&|aq4b>p>Z5c}@m zmCJgL+wgZQFuG{t-TazvwY)vF_uQsTy%PDMY zbS(Y#V&y}A(va1@TC40UDRvVT_yOIrosQY;7xSgfxfsbC0|;Mbz4-b(Ul;@F>N{}B z@uo(#qST{{sUEDH4);EGPjv_MKTcK2je30zNLwD2mpQQX5Q}Ojp<|8v<5DjwohW$s zZ`Xb!piQK-uDMz5tQnA)`E16vwviG@aRUMS zL?^9J`QJSPs=z=sSykD3;?=C@_%$qtqRqol=b`{g|3`J&_9NF;rt^v&uKHbN>k21F zTqk5Y1j-I&m%5(@5+&Y3FN5o=tPMiHmjAd?{C;cb{NKwee~KY~XnmYe$E+*o_)R?` zj+#>i-WVTjbz5x4XvK1G9N`9LaS&Fi%np8IIGX2Phm0lpf1f+q>Yc%#>0lKznO@GC z0#a>4z6HIpZsk^2?8G`0vS;7_MEY0c3-T|_SX_mBsT!5{DH?MUrtT&eWU-N%$0#`x z>EGfbmypUD3aII;q2Ju5iSUH_!{g+IYeNsFREY_}vBonohVxVFrX&n^ejyJ`ez6^# zeotdEOO}b7QM9HhbQ?Tqa->T*-#dH6zRE3QNxQ0+V6Gr;%gge-6)A;h4?&-bqv#d? z+wRqc1`!h@tg2YtgMP`e#R$slVjgJfz@haY%&AY_38$)2TvK z-1HQw#o1NL3Ee)*C5qH4W*-d?Ua&QpWx*qsF8WOuU}ji;AwA8gWrR_mDhKFeaz0W* z9FZ4^edWVlb|D;xmd7T09a{)8;)fYC5<^d0%n?<^kGEKGfa!8ugF~bd!`DN!mog+QqXnwC7_1mwz-F)siZFm=o>5f^P_I(+QtLf1gDEGvHab68qu8 zv>Y=P+gs4@ac`3(ybrZ-#J-myWsJapYUbYZPOzxsqH zL;*#RNreZ#{l#Php*hoI;Y%{1oP04sFmh5QMQuz@^eiX(+a~lFyn<0@PR!gg%GrRu z{1%Q4nwq(Nn3~a5CiyH+i!vn_o90r(Uitu^h%!4PV1|%+Erng*w6!*3LzGESl^SJ1o0Ix1}?$FWh6#Md5ENQWU!^#9h9wS zXZ(k4XI)8p?eAlEa9!nmP$JQ3L142@VwNt14gZ@ABW=CBDf z<0Y$v5Tw55c-A_ytbRGwVAHy;o({TmFa|P;nMBdT=OG^#Z|p1(O}V&%Eg&BY+b zYTVb3(3q^8u)GVse@q$(w!% zJYBz>TV1leFnfjP0b8)(VHAAF*DHo!ZwsKTt}tPWd{T`EGehgg>Pi<&XlPNedcBB; zHhvd}`f6G~`9WeRSYav%XXyQ@C0b}SPf)hB+;zlqdA zxT$jSGS8YWdaOKh4$_-KA>F6j!_v#kHJ&K}FuO6E*?o7)8T5MKeoF z9(~nWd(o3?VJQ<>ovo6jKyP(N-GLZX%-q8E0$LYXG8zcWq7fIrg?C9p{*Li;EbjqD zD}`xu>+)(sc31a&PzHsNsF&|^vcl3ATN+qzfSBhtaM3u+?-;-R`_l?Aq=9L)Egn%u`*S`#hHr*E>9 ztRfag48H}Tmi&$YFAzK`I z7*3(0mDC!qI@I&jS=U*_8#p|6kj6BJNHJGd_Rbu;vS^a*N#M+G zWa@xQTb0!4I|reOk4a~Xl-mOwqCra2@u{4AidgQ!{%?l1Vxy}*V9xeoIFmAhcs=@K zQX`;sXLm5PpI<%J_NyIWqWs6bSAOG|$d00ymiVmE)0=j?92HHv^JM4%sNl~1+erWT z>PL|0g&>`-LR_jTF7}0tY%iO>83YneL9~ZH)oZ;*GRv+y#sokU;64LNf>-DFrMvu7 z>j(jqlX{;{c=6%`qqH)Hu>jd#-)eWAKHIkowBl>W4Hq3-O)cV?Y|LoQr?wZ{?|$}v z2;4wtDRSJyTrjx|R!lGl%D|OhubXO55vC}VFC2QwXh$y|3Y~ftD+X@Ib{I`(n~NDC zX}s<%;K=09J@q+%Rs6}J^BpZ>u)E!2C;?!t$v3SZUI#v@>Br&UOC-9Cj5fVWUhoBw zt49>b&vGA=))Q}(KcSYtuWB%PCikyC?>Eyo1zT6RDq`5>?OQwbG{k6mN#*dUW}s?) zu-=cZ62Jeh@_bSSJJ1one#g;3aZSATkCtmNaBfVkK-Jn{`IU>eDjM?aD?}NNM96Zh z)=kHqrH;W>FI;6{lcEs;jK^A@7-XPmLO;lnIo)nSUv=q7SvQwp9F(iD`0b(}v3!$S z8yPW2kr02`l`pm*4FaLoL4%c=+mC`G*y(FPCVchJ@po-8b==#9Vm`>Yi!CB&ykqzXQws8yM~gb*qQnJXCbCj`Vo zLhw{(9|`%zw+H7#yQg|UOS61rRuJCuxoRM`8y6oF8o$M^d*ZMEV0a3 zQ{M)+hV&;85KBu`+UigYYpriod_OH6=XpnAc41_SO7$Hh8(!sy_?0&yW(Be|rNqz6 z-`6jX5De#jt*61Qj=p6d96bmLq_!t1jf`WBdp+7nsEVVUW4wW7;X0lcWFbbTrn!A( ziCJYP-pG#Q#EGPX!7Ein!I%ESPR{FF-Y#9{$YbVI9;@ji8t)h)J13K{ir?0{)fLlRA*Sx49n36UsrdCM!wya5rur0Ylt zk4ir6VcuRjgbZzI6V7)^v{a#&Mw2X5uwD_dCc&kGX_)`SCiHjj`Z@x>IhVHN>5b+_ zcZ37i9YYMM6I@h2N&VKDKDf6D@%=bKIHlhz{91`TD_8-3emC30>Imn88kxT*KmM|M z@zWfNZj=4e?dg`HyuO3G1O2Bm{<9WlhjY2YQWTVf17rkY0ik$I9w1h@ClwgrN7eB|j3k%HmVMTn{^%on zD+S8oZ`*)glBQGTn*%sF+f?w!%RY#BU;icUMdD{xSy%V3<4DS1vDVOAr|ea>@cIVa#-9l+_A%@#}rn!J>+xiK$wiOZ{g{ zJ=!;*RX=f{cHTzMqkU(nShpr`BBcbOi5ovmyeu2BD{CC>>dmYJ#ke_;Oofa5w=nwbwdhGAZ?FP)_PW*E^|t{uv4o zR1GG-c+s022z6n3A>rm!m9#|V{{awyY#Hzj)Un2s#V@vhg1<%~#F141jvQ+8qF}j-T0;jooyv)Ky;D(H5Dh7;F;GS{@#F$B1OHaC8N_yjShvBCg7ZSj;($6L|9p>I zg7GLe-=~g(PfH|+Lt)e|E|4YT<{^_fQpx|X_8ADS>mfvzeuF9>*EZY`TTJJGpO8~D zRoK??Ct+kHuNE`GF_GI@^H#B<>xSRJn0psjbRO1&z2vwDih9Randrux#c8FlO_%C* z<2~ndDlh^^>k9m&*>c(A5=XQkP((crOR!*#JH4lzg!fX!4)-^=wo;!#bMr_$aQar+ zwfi#0wZMY>X1_L10jYl8@qJ-HzHcdX$-kD9=!o-t470cG)q$}Ig?%JGFiQCt63&`! z*r$5!5#Up^`D(cUjO{%p-Tgw?wDM)4^f4leu-VWlHqn8=-?SALDwKyx%W- z*QDZDyDPZ;_%3c;`0@-72{YNjd2ARlSB^ZdMsTA1ndML^V~%_gfzGcQAy+H}y_tA% zv92TwW%Mb%Y9KiqLDM6mJwo*&mIr`jT8Y5Kdtgtj<}4=*9gd3#oK_VsrJ|8RYTx=c zhB=vgA*+f{vYd%*e3$M8M?6B@&x`k>NfU7a&`nSNE_nqAI^qw@=CV$+0}5o|UayO8 z)Nf;orh?E+|N5g#mK8!WArvFTU_uNdkrMR}PuO_VXB7li_?5$WrWDIS?i+gwbY=uJJF7-3B=H9s&Y#momSD~VVgIN z@at6?lJApiCsjX)Ij~=|fIV@kcN$8*&@X?VZGogO!1TCtxVmC>{6L^V5>(IWfbMjU zr4@&9gk|3Epb1PMcoJ$pY4R`7S&w7@+i$^4a9M^-JD9ou!7)^~U!Yv)o^ zltor_9ssa~=U-Z^-pw`*c09g2`jPBHII^Err`3ek9;B5HD^Arm(bYK?EpPb!y(kDp zw8zoqtKbOlZI#Kk>n)E=%+@94rz^y{b?fSt_%u`0Zk;xgr+1QHNM4B#gdu;Z zw637upkhmC##jfYj~rHqP7(+MQ}Q$F(6}@jnL^+f+;S>nec)epF#YO=`BtJf zc#?uJ(BA`&d*o(5N}D=DbCBOu*V*9^U@>P!o3{(4ZUV9`lnky9E9NSa92w1xU--DW z4pLojMOoO*BcnxR)KSY2jd20i=1W0e$TgNi^qA5TEee%fUZGFrog#lzHIKA`nlX7Z z%E~T$HQVU*A@p&Ph}t16g7GAl7uG7@k|Ot35q#>vp$^?4*92nMs1p)yeT{)T^K@^o zA_JGU`|rwbKVvr_WRtKige#h^wO}Yn`9){`p&2UbXy-MkmUb233j4YaL+x3)e!G+% z_tN}ae|NNRRdag1sQWePjs<`5EKM%@QB0)&X|I2& zSKar#pbnWDj%5CokafedxNdkfDY*E$@i)fJm~&;2*m;ZuDGEb#5zeV`$!vmzAZiS zZ6~E^&WBtQZ@bH4-6(E~8e7ZmfL9iuh?p?VuFJO!HUz&V5^q=Y*4wNEV_69k@c~e) ze_X9?-?rQ_0L|9%k!CKWIel>qmgB*Fsk@?xER`&BE#9`2pWFqbs!6yfFJNDd2qYOL%6s zfjX{WxCk#O0>Ws>T!deb1jr8*67t-GHyX$x)&AgzeLh39`kwXdexnEfmvgKVPQwI+ z*{QsVL#1#FsC> zi!x@&^ZZ*byHxRNyTLqME;K>l>)|RcdCtDy)HMZ+&B}>p#w0{@;+YC`;5SwGR?3)b zigyN_#LY9wMN=|3$KlYuld;g;^MiP2*cR%Fvemp3U6ttLnpvner+!o zoM1Kc^TWmwj@EOaH&%T!EusH{#b>_8+3CYSKH1|31b(N0dYr_qn6BY?CA2s2i{HNe z=@C$#*6qBB?h%A6KKXs)PMxQ5pSHH!-aMBTG_cQG7nYeXV;&;G;Gb>KKra5~%C()8@GVy2FoHN$%^EDcT>grEd9zjPaho3rKe1M!!Ozsw%Jnbt zoV{s?HXU*$kUnpVQ#9-|7S5Bp+(ErZV$|y&nK@9uv_4oIcbF?HO)I5D77$%W14CQg zw5q(0tu+>oozPpn9VX_SW}Gc<$dhSH06f!!9ESWB;JYiKrA_9qimS1V*PASUxFe2D zY?acO8DDpt++t}}+<;Yfl!dR|!MHCm3KW99GEly*fzsaxeorwkS&KFL05Y)|s>&|s zxr_6*2^zr(g90Z&f>Oh#eN6{D`&4GE(w_$$9)J+ko9;?~ln%gc0OP9xb<|@XV%{vH z?$A_KY)2^5sCF>Ddd4MmZ|C_fv}CE@#LWf4!?kJ=cs~akD$WJ}6=VQHYR=cNZI@XT`zCZQO!=DwR-!_P#!u>>mVmqSVRR_FmVxj+sb3b+tSt*A0%OfDDX(LWM z@s^rAh(|EcDXZ}Df1hEnYLztNYvV6d03F~RO+!rNVXF&@t3S1+9VNxSzRJq>f{DW< z))sHuA35M=U@&7p!OeF&^?16xr$R!I>6DQnB_k>Qz>bk-%Ki(O^~$v#xFSgK=hRJt zA_eY;Jzd=MFuo=f+kKSXCNedU=Tu_&;nMw8UE*lxzGhNV-H`kQ49@{Pu);=xCbD>P zaJiW-#0&cRAR$?ps;&+Pw6--azIdzik>2*wMACQ8LTcVAA>M7{_|4!*gi~!7CZzm4 zniWM4}tz4mV;044-6Y;Au|P; zlyD@?Pfqd}Jc8dK+Bxp;S!+xPR8Yc2)vxa{`TntB@Hj8-hKf@CXLjIX*OW&VZ{|2o zI>vW8W-qyAH7|`xK6(`#Jze1wA1a{Tuunj~Xp>n^7qmsks*+D58=ZQ`mYR!sDjrVL zb;RLOREl3gHiUKk0;_t%$m}-pxvJMd2d$tOQ zm9?o!@Q*#bqn)~mJ^+9DI87v4UN{&tDilMz3X$_JsBe3H0B=hZ%c-m7SR-GU&EeYu zwo9#2yOteBkS+qz0wD*atC^|-KURDO@@c(r1QUt%-3}7fnPI@Ii`SfE7wKszAiX*_ z6RV7i4NHBA1ZhS8N@q^k%!PRV@O61hDxaL3mEL)06dE|u1_J_AIpx3lBkCBv>r$M$$jVJDCRQ_{Zi z5EJ@&PET;<^@$icD?`_Y^b9{%co(veb%Gq5c=UR^reCdb1wst58ZPDeL=w5P00b1& z`505_9wy7osdBXN6xz@TWylDa zSx>oIRQrAi+!nc^WVI&N9e|^5-8II=ki1}JAMXnGrJm3ttqBSxg4`x6|O!k-hiBG~1Yx0A-)R<)vgy44%)W!Wo8dlcU zp9#nPivy1%7LGk^+_St;R8qMsG-Jf{cgZx**ToIy>~XJ?IdmuOPr|AE;WV@GZo80m zMs>VY%_m_=#0%EW!*$Wp7lD??P;~ zFWvkBmyoWtot<g&;0_(OeT;CIp!# z4+xn^RzH4P#bfBaBzF8#x3I^+z??!b8?;$)Cv7$)HtmItcf=BEKfa;!Lv;Jm5fyZ{ zhrN+1LOFHjW)E3;iXVhFLP)=(m>Y-SW#$zl>Cq^GY9c~waN7fI?1D>}S%9A;%=lZkysEz-lpY&op-6DWmQC^x=WkbCu!aqj=@ajE zi7FdeHpi>_IZ8@T`%Um#)Wkc969R^{%LsEE<)A!&=~Yl}7=^c)R6NJPIhPpq!Io1( zXOs`jkO}RjqXTSB>%k6*%OOrsSOQFPeN0Os$Blfdx?|jD7G$!ni!IMNyFv_$TocaU%Uk*xtD*!t$%j|p5J9?$Jyt6xwl3~GHu`PYYaur zf@Am0Ji5n@r9e|p>=7|{fu&8va!6fx6gMFmwQt|`*2~03P04|MZ(mcP_7Hp?>e4og zIXoI}+OUN+UDBbzSPQQ%;-JDP$Fq@_Ai!rguz^69_0Z#{C!j976xL-O(rhkf2%RbOhvIy!)L>GdP)1ql$#TUkZx9ul<_vY9+Czd( zPEk7v!IP5j?lN$fAo%$92OLw^5LOAh0MV*<5UF#7&)oF{;7~sJ#KQ(Y@nO-W%>G_Ek3){ z*MYR$+ma=ER^J7{gVuY^&i5a$-_CJ{t6*I&A~be%S-P4;lV!`g>4O9hrAcwVMy7RO zgIo3VB$U89T!U_88BZqk(~0q#gILiJ4o>U{hIrP+9rj@Dzr@9BqP{g@Qm}5_rQ_3= zb{QC7-&do1b3Q{<(|_+eB7H#Qmaqrm+%G!EhxQl|&4P*DZdWF!_YaEQkVhB&o4hK9 z!V6L%Xq4Ao960EMoE|os?DzHEedENEyW)lEN3GM7>d|IE+=)5}P5CS96fE`w|5~en zh`(g1XT}Ld?$dDBgQjdlmz$L`5AMZzy0aPKgc^cR>s_|w(_C629<|Tu+uCbV)AMlp z&~;Pg&hEoyOleiYuaEo$n0|$_S{e0dVdp&9b~QM2xgP%Wt=`5`pYDySpUYm9u-ZA? zCmbd!LY5nOSF9CM_wp&c^UfIXP|+>gApOhIiy zXY`myr^TaZCdw)_Qnb133WIY6_PV1+qoWP)D8^6x7s%=xv9HkOY?b3{%MB%7fX{h%>t=|kt zxuMlXU&#Jo74B$C;P&SSYf?kQeinPoG{Lw^+TKZbJLsqVr-H~-~gNsAj@y5{~ zdz6X`1c;>8crKih5bvBIy~EvVS2gV^+!xV*00rGP>+3-GFgD98p~NVbzblra!uHk3 zHC4)9_R5oSM`-oJF-H_;BI=F`)H)>2<@Mu&#y8J6rnk3i zsz|O@dv;ajk=ORPOW6diB^pd{uxED8Ka}Wl<6#Ma_?zJQ{*osZIfIrc45cE`7K1~t zi^WYPq05RL{S%6WH5U;=_(7X10u-19nEFKBSDL>oys_Z%Zi zs>hI+&|QsfW|KDg>e|mYIpsmXIS09cXpqL|3CcwYD6SdgqBFD2%)s`;If%n}lQnT^ z5OaiD>`9zRUE^fBH_e%!+C?uA`8Bz9eTP~h$vSyK<+RJO3-B>QV?0K{(&BSMj6I2r zkjvxYG6EF}Sq~+0Z7=q*|#3O2DZcUBofZ`7Y96qllYmB{xl0JJD3P~+14yf4&%eA6i|qD zjY>(-%re+F82nP9SnPu=3!5lNBoTmD=iO3(fNS+x zm%>HB)6K^0y69}{NalJ8AEZCkj`oz@FUi9$BBbrS$vvZsf|8q?PO=uY4(*@UL_F+M z&I;+~-KeytCiZ_vz~1oziqG{mw$!mEzv1W%7UQYm3{Jjoo9zY*Ak&0~JD-8MO~jh@ zlpV_;3|FMiU*H6wuL{cM+*`6%Jv(tt(lB3mZ=9UiUfzXZ55_QUTk=DEi76BV#JO=Q zW06#6C+|}+(yQb$-FLmPg!{s^WlOP7%d17iAk-!EG^>PA`x9r9qW9@kxM#lH&p*C98^mi)zW zAB_Z7aRypGtTgBf&x`MI+3}(h`D&KP58R}4J72--Y4Zblq6}oIvv#iCu!{>64TRVq z_4hXW;Gp=@1@TYqJLaMGkQAR5aadVHjKPNn;z6p^F4k*px;d%6B~`iZEC0={xP z$rH-=`Z&L2b(Q&c%9cjh+zP$?x7&Z;faS=EmpqV>haP&FxGi!v&F-H&1T+`?5d%FD zHlSkS2n{lw%)uJ74Tp4p!CHp8^<8d>OTJKO_UXwlg36F5?vn3!`^;l|hF~3K?`TKg zaWVQOp~f@9=@A3J_bmUF%gr1y?_{N(bj+y@7QH`r&gc?_K27W4hD zbpc;~tG^ysar*5BK%=$~{k=`1HfC@2v>jqis1h`ZCFDL-6q*+ZhkPR&C;sT0jz#Pke-m zp|3o9efak%a@Bq7bAj?5P9q002ydNJYcB2JeX>1f`p|jlHStQ*I;iqW*A@}- zjQH|HQkZ+~KqIK5AyT*hrh^G@4SlVw^Xm@mqVKPk!ChbTrtW+R+FlDW32iq88uwn9 z8og=5=c0#ciO&IH5)(i%$b0ib=2-OcoD=HX)$)KXCIMMSv4$$NREI&Bg53=2on(*s`E)kh+Z9`2E>^hqiY3 zB+OlHWd?kHcH2RGmSMa8@WXuEIe&nKt-w)nsUaAm@27~VG}UyXh{2=XpOdTJ;^MSQ z``zr&0|11Hv+3vl0>bW9x%Q<>N5IZkt{(=3!kKM9YO@s~7{DE99=F8lrBwe;^VpReoe&eYl3#;Sr8VKmd^ zq{6e;_?Nm#HeIf!xxIUHvwr_`PyHedPdRRR0MY0XIY{B6MUmTC%-ZJx={w_-4<7`g zNlmdb2@~*xmMiNFsO)Hu5UmcKeFmv6&oKxcHdU^68`To$&N>_XUT_e>;&RDb+|api zp;>dQwdq|!@|8^j^W`v;o5#1z6P%=f{-v{c(Z7f`)K?GH%tCwV%Z zs@;g)+GpW!sC4g>lGTH|Z@L*;^!oi-cYA@>^c0$R^rL#3J^WkwrRAugQ~Gf>+3(hP zBY(*O@%#$1dgY7$_bU0p1Rj;n4iVh-jBr62bFXUk*?)YQ6`yBOzbSlXbH(hN&7yuc zwiz4nNLnZbU8VLD5wHr!lDQlGxa6q_;!?4L@RtODxaioiBDoH*;$3+N*lAcr>T{aE zfdg#liYc#I3;D6bHg&W>sl9+K^bIZ~lw9vD9VuNFEg{!6n&K2P%s-uj5%whcyp4D5|E4O$ z4Ry3JjsnI9ID9CdD7s6U2&=~fS*8_c?O<-iQ|jNCv5A_t_d(CU2uQ&=Y)HENGG8N7 z?y{%~&o z86HI6;pomI?pKlyzU(7kNm(&`P)M>qK8vXttde6@z`w4QAoHo>=&-L=pInw+8^qW% zSK%l-*)sy|iym4xRpH>oPB8BCi+`chBJ@nC9TcHo$ihp^TB@mWdgfGq87(|R26^mY z_d!0W0bBRqvKz)%a&dDsLoGMN=Wk!`7|!t2}dk!o+8V~2MK6%5*0%( zf(03<_VU{^GD&TQ^Rsy$4Y2N_ReP6Ss`X{)dXEN8y}_pP+~cRDoLfr?IB#V$4s&|T zu^?bkQ1e7GT8NbC^ezcw36+kq@}^ZXQd~-FE2zD!ca~;5hYHB!mqWfs+^QRDxvcZm z_!yr2(d+Wi9NE!-Xsw;p?ultuJ*w8Nx&Fwiw%#)gZQ~sK2&GB3ht45n*yjSKPb@|e z>6Al`K_UzLzK0ig&IM<4hy3(dtopj#5b1aQhc==Ep{5zBO+nzaDWJ2FRn79zw`86y z!E8#KKTm^s5Z3+_A^Sfo+Tf@4HFf_V%HA`qskLhZR8dh8(1QxnqH=_zG*P4%6$KRp z6{SPOh?LNgP7*8_6#w+tLv$LPI*7L0UUh7_;ZNbf+F(r1?kkB*ANBUmvDma~dot}&H%xA<<3$HT_O^*~f z0UAXsK{lhr>x`-6wOb)Ahxqtj(|&XSWq?|>8hj%w|?|JyK#n7|GKtsY_lCM zoTLf?pY}-e>NP5t>&Nl!QSjiVE1N>F`n~;87z&%I*k9pC_XN0To{cj&FCXY35`U`6 z-&AV|LK0v(F`WD!sBhE9&tp$A0{w-T>lJz{rmKo^4A1ng*(c2E*GZz8J>@rgNBLPjrDRO*J9$y zxlUOo(nIgK)cNQ-=f5yTvlFtS@%cJjFuj7&`R&n#YN5F5JW)*5ke{faRu7GPEE zAOv@aSd9?0?{4RHNB-o>WZHK2u7t?<;9Y5o72=Mj<(YDR;00)g5NxkIrF_Di6Fewk zRpYmDUzziPh%gHGUe6JrZ#$Y%2FCnz8mzVz0aR3%9<$B|?aGus7R7f*ALjDpk44qz z`Fy*<>k7Fe&T;3@Ek;Ja?M0nZ&Cahk>J9*gVnlcHMhUfCk`8ypijVQ3K&O_Xp5gEF z`&Md-d?+<(a_}#d^>0uMfdwsSea+{C(a0p=N&tTufXR$5C1xwhQ zk%qwUi}+?3ng1=gkG6#E(yVJ`4Cfc;rIF|@s2xK(J9rX5E4Io z^@EWZ)JkMgMqfRo4pxm;t{1+_xy$FxgZat1Zp}UgAU$L5JHDh+x>GP>+sEw0W>2<3 z9zJU++$P~JcjLrHVXHiXv20n%(6~Ea>PYG3R#-@>+^%J3;~z%Cdwe3X@W?<2 zM%D2v8B)tqa8itl@7iE-MoGePfL+BECiGJSOT+ctV-UChF#tm;WAnazWp`eBEQtmroRzyTa`apl(b%ujBthgD{wL&&c!4B1F78QhaGaNWA5s~u@ zAqe|>8Za#>G>As%jH_^`0Nn}LP{F9&^8BhBLP<|e4i(=Ke}iU5Ge@^*%jtfzeO&R_ z0_dnHx1Sgue{Ib099z~>OF!}~B?^{-ect=-x5=HR6}L~{GI<5|x9L0>XHRYnfA*+{ z@}i&0l1=?(oaf#6QoOEtCF%6}gnWMjZuPy`4;JVBsU(&ECJ92ZMpa$HtK4ycz^Cj3 zWs!sMr(_0UDa;Za{~z-R1vGalzjvdIH_D0(9p{U$tM3K$TW$`M!8(tA`T!)=?` zjGGk?9qVO;Q{(_NAz^^m@-la=X1c%`q?sq~nEB|Nz#oC8EHJ@4JwA;1*0ne6C6z9O z)8*jK^-cY$yUFO3ZX=eW3+PFn5%aCmaN8`A>*M>V?ToP%=$!{lHFY#(kSMee*@e!9 z9G0tO7m3T4!*>0e?>HdWVVUYYTbYwTN7cQK68ACjam-%^oxmKnuvyQd5^(Q!y zgW9!5nDnn?)Jz37y*&avGkD(WKEE8&1A}D8dFqG6K~dayQ_(ySlLjUsxnEX+#=o$o zPdVyY%*s#dVXRd{a%J(wkxFjYOoz;d=8zyGN8AM-Is)_%rui1>{``Rbp*M~d-WYqk zfHFzsot{T;THBDy;you^k?TAh!6ZzUI_4DtDutR;0Ip3=A5hP{#<`2n@riQhft3xiK0|28aL6cXFn=5 zgYo7DSMY-_q;R&C0@VvAXF1zud6&@Q5RyA$Y%3_-=RmpGu9}aevy0o`P2ouG)~7L?`u!eo<2h4Qe}CS;W}y0*#BjN03oVDJsh37+4W4fuP+)_E~4 z-k`a+)WeY)xlMoI5vYeJ)Fqp7$nra_iMd+nqCFqb!Ei(M&y*_j>vHhLOQrhks9e0W z$k}{nUU$&&IK*m+7+zC^zs53w?E~w7L0KO@UBDhJhI<^c@=@l0w4$&T-Z;aP5n}xz z!Lkuf13|Y3@N@`0B zgYa?$-%5BJZ`LupK|k+4kGVN09~tM0&&nStVh$H#@7Abank8ZS7h++HA7`)}iZd|C z%^d|BaE7t^7q+G#D`fx@J5x#4!@vYms$T#+ zoo#ixHld+~qYW#}xW3h>K=>c!w`-2JqkLOHd~BQ{Sk<5{U>T!)O1S4DVNAKeY4v&D!nYM@CxI5fS$awUOKl z7yOb~)JBAN-AmqYAdM<)XK%@?=`mt%M%nrT>v7V3I79muqo#|lJF^JsD&1z+r zQoBs}KB@%*X%bS8+t|Emavij$ocEbB0Iok?DY#6E!YoE|E(=_dog`sht{;@ODuUpI zZi?&xIO14mg>$n~j0Q|O!4F;6TKSR-DBp+(^|N-{cl@Z3V_VZM(oFED^?Zxa;_@5F z%T1}rBZP|8mCIFc^wO@~uVb^A4-*my8H88b3k zC)`C>WjNAEE)(Zh3>yiy&b<@C?BfYH2pjTQ0F8XTLHOzzp#Pz|bv>_ikpB^@etFel z)-amui%w)1m2kFcU60*-$U51OQp(w&2Hc4d&#GB4bME~{IUh6wvF1jBZS@ePST7jG zlxuh)hu%rvu4$VKjL$~PFvGU|w^|qQ-*74fYJspKJ7^3(-lpyW&@j!5aNtCfHWan0wE1 zjqx!DcRizBZC*W?Y26Rh@+V|S$ffz10MlBcaYl8vh;ppap02(HB|6&2jBhcvD1ECr z{^C6Dz1`S-S_k(~PZYz1^LX>z8CV0Qn&iW<=Gk6dsMY5W2$a576scciH8~CNE&*(j z4Begk{xYSzhFmdYewASj>Y5e07Hm**mYqucIWRJ_fYm!s%~wSFXgP}WuH_2QkB#P%#X}3(wm1IL zGnR`H@-PH2w}DTz?j;t=5c!-~&)z6@J2Dc6LxUX@Gme zu44wY1$*gqW?E~G z$>W;w%9tEfB{NFSC^IGj#+E{~Wha^gb`voyl6u#eIVg<{xf!{Z{&A|*`96Amo^(p3 zyeQnOH|RaN%*Y0xsoornqj|FK5_9y_B7u{(LkXG;?Pb!9DTu*_PYFf`4V5C`HX|PE zXhD>D1Gv8*T)+E!5IBEiO!%&mdm($+X;!MpN|9evK?cf#{P-~BTH_#qb8ozGuN(2BR zUdHtSfAWvF(yB!};_{BDqye42e_3|?DYv@;OacCbo=Vxxclxg>jvDRhMyhbFYKele z1{Mw~nC?*?mJ}?T16k}kYQ*aKp-mGtCw2C72Wv+~?@I0Eio$8=p78p&6#|D|nMFDE z>cVoOT&qiZ-E8JGwt?=8Enc6qUc2&gKqU1bUz^LOfyYCza}Hx_Pvi9lD)|(yF&q3$ z&S+J4Ot_Qx^SR8rHm+3s{e6H&N5D5;VvVKMoI!zy2>cBUme~kuNM{%Dq1Xxtr&qgjxW-#QoA!$y z(10}VeZYaFU|+`VKhVSX;`On#BG|+7oZ1Y8DjG1ceeJUi3xXAM!DMPLjGV?hv9Y26 znC$yCl|_6n0ekz~;_NuFqb}d*5xv2=yb}h^LiaD>CTc;6EojgDdb81Ud>h9+XXX1!Y}ORt~*M5k-stC!Os<3+8qETp&}+ z@>$x{^14x>A1;ohRm14%A$o_v@~iPxa#$fk$e{YQ2H`F~XjIZcsg3{?6gD(0Zg1v5 zN_pq&fI;qt7uNLbY2*c=81ao|Mc78GdIb}o&+8mrMdDMii?6Z&45YSlWp05OA7_bM z=WhiD_8RLh6^Gbd;P7>CMk_>#ACQL*BAJ`pG=-_<3nWph`*|;OW4|1bHG~*izc+Vw zStE+^1`f++J0vh>X@ImK&Fq-Dd>Y0XW|@zNVCc*CM5sNe;Jh(E>QgxT<28I?&{tYn zhFZfn=Fzu?xsG7ghdYHRAh&7Q?B&kVjCZZxr~MevaM*+z^QsZWJ%M^Q_3#qF4-MJU zIChaIhBkd2|9&B@_#}q1wX*>M2I$heMPSM`G`j&Mu65#uEd~J6*n2+6xbhbZki-__n`bgg0-=WPu8s7u$2H|`1e5DHW{dP{TIQ#*NtqB zP{^?~YkexDfh?tAM7f_utvo3DW`#Na0?Kg?TEFuXA?wq*t~tVVJI z&!u&uO*`_8$w-#%Dh69oq$SGoBc)D5y(nS{=Z1^@3IbfpUsp`f%u;UnS*%CK2iPX78_=mN^eo)VB`m%NeF{@&yx88y zO&e`swGU|U!vPq(A{;(6F7;(DGWtuVQELf!mO(MXz^mdj5SaW0Vn9y1%N365aXAd)?5|}s3u`Z0TTy;?$f1q)oPZd!fgzO zQ5n4J3~KmWrbb_cZ@e6&#&4FVGwjaAzIh6Q{}OJUCZZMzz!+<7pyVB-VhsbPZr{o_ zO9&CHs0)5u>QJ9~{Bb{^G6UyNetZqA>E$%f%8S2v@p~CmMR#{xTV=qC!}V3Bv?Hw< zVe?)#0rpTT6ZzQU|IvZJe@kHCPeDkZ`I@VFX0tY@lx@?GT6I-8_?|(iZA2W%68wx+ zRi`434w$cjQ$ABZN;*~?juCWw%)i$JgWe}$4jVrcq20Ci35VUw7N}u@Y-f#?c&A~V zdGd~G1tAUS?UR5qcJ%cy@teh!Xpl*raGv<6&;xwrwydDw+d4)4Od7`I{J!n>VLAh& zuedxe(Ny7FtrN`IlUX{rqD8jPVb?J^tY`wV*~Jy;n}2=OmyoV&#Qbu`o(zn%7v|tV zn_b9W_|j9*T7L= zA#*@X<6ow>$jWb1ylV0BU;Snl88)njj`G(($SZ-&VM#vGz`o&;Bf#ZVd{#iEO^8K) zi~3tg|0{E&ZWb%UU4rBUX-ECEu7=-~sC{_9!>xaTb3Qr>;5nkXvGth~h8G0O^2hW? zg|=k?Z(;WiLo|{YYi6KdyEN=bt+rS#z|e9U(82)N{^6Xvjc6{Tg5tX{Cg6|3bZ+!I zhmQ0oyh(8(G<%X=Q~*7aJMne$chlnY$@D%TO8|r-09BO0*IRPRySF4a{#ynX@$`?# z#TkF#F&wtA{;e8U$IuZ#dWZo4^c84)O2J`EI{lxJ8DI(>!2AGmD6N#+^ZG7-)E{&H zl+6FD`PC)!rztV|G4S+VHGqQ85f!^}x>x1+hfAKo$dqs|agsv1jqz!iSZ=2@ZTGn? z)rzB6|BEUDl;4c#A1T1b#w$9a9?t#Ep5KT-zxzl4Ud}f+BP5WP5tT#?m~~Lx%y3<9U>u9{7s4 zoPd~r>S2GXq=J?iS+iF{vC97Y!z(yXJTa;%L(4B<7E}Kqd}Qo3k?HsrVE+M)o74GF zmuf&(TEa}RvS8y!?*FZFXRBMJvN(NCzJYebIFNj@7ZpIN^M73XP%YY013s#{@I=Xm zMG-w`Yf3|^MV>W6v^ytMpR63E8|j4Z1*%B4gGQDjLr2orpu+bWk6u`Oyc*$Sy!l}c z&G0Mi+MNDzYJpE*2wX&Q9x_XTo*T_EWazJ8D`Wq-+JzX6Zj!{dkH^BS0$J%A3%@v$dD@iVq$?)cL1!x!p>RV7$5t!-}_!4vx=+4()#d3z<<86}av1lfCoQfaw2RTKi zM!Gf!b>lx+e)(5A``<1GKn@2eKDAz$tsPyWmIs_eg=p9LpNb^|?f53G#&<$_VauB?j-jY8s& za#4VQR6iQtFWkMkYApB;*mIgs#Rq)=3>d+Q8_#dK5Q2OT_>OcAs9Hqc!J5s*_VAvZ zN~oJ1K3Q3n>i6L=EPeLswf}3s3FDAu?R-0Agc#yHmJum%?PeP|@qfKs(8eJK3SdmP zj6%yA)`d#SEypXAOM2Q3vSp>HR^k1G3c%vfurUGL0Ez{IwI6@la_TGp?TMNHCPQgB90+LKP*w`Kobcf-%oFM#;TqE zP4&1vxw9<0dayZr3RCPNG>6FoYB_*2SYK`SmH{yzF32D<;{oV~Dqt;(L|S&xM7{ITLy2eM*t2zo5c%MQ^F)rghVc9nfJc zcCLJiW=^p%To<2uthX&?c*V@GGW+D4*@QH zg_^598ixBfw%*A8HdHtC-95G9d)V^*hRllILjRAMJSG6-u&6%N zWYn_7e`Q%x)V8wHK1*sj{UIv7uEem}`5gU=N2EMh*6IA}PR{BG;x}P~U*Zw@OP3Sp zuP+9=QwrvnQo0>$C47yJk&o0o_}T574Tce|AdF-{JwV^T_tn4aZVJYvj)i!CFrDdu zc|K2PLjkAcnNqmlM?!%1pA0Y7vR^GpJdI%^%P}o?X2x7Z7^&Dt{DS(fae1}XCW%Tz zS0i6z^K^VC-Y)PJR>c)Y$|epkj5_-TLaoA-$eDLjf>MQH_k!6(OaIm9Mjz4>cKOaE zumh0AfPj7!Ix?y@egY8htdbPV{OvokEPyOFOcec7R*%?NU**6Sf^a|d&xI}orw61@ z>P)9g>^^ks*4|gguKn$P;Go2-e|GPWKKA*9;!TZX?`~auQ~2Z{?w8JlPJ1NHa)8SS zZE`2-!MsrQR%CZnkWIY{$~M%t$N}@MCY0lTw`c+8mvj=PicjMe?NLaOJ}@d*V>B0* zh3h-bjj2gjiWMegNWVQ@pk5M3r;<|ZlT|`>q(!FoSVGER5u&o?$DjO|YYrA3A)kC* zQyUrPPhqvM)a(H5UND%LT5b;%3G;Fe)8c=TkJ7s9B3Un;V6K!2(tULOt889J_0Bk| z_}#MxI-T9*W&E;n+(yZPu_CL+cQ+*$_JHcClgiRD#$(~FUzVuo)R;IWT!WIPK|E_Q>$_a?W7|Zd$VXqQVh6v z?BiVj74?b7qhB*6(8}6o*iZbdua`^5HJ`MH%nu%X+PUZ1Z>bry(a`CHz{xkX_uLyt z)4x_5gqeMl)x3Pq9zy6pd|547I=#ZbPq5b~*H1ajNus=!-sG8S5dLjuvdL4-jD9(2 zpJlHjqt~U*Krcr~F;r}94QS0NrO8UIk~z9+8)JxdK{Fh`c}%Z(JbD5lDj8^yuK*{f@en z^6p|%X?OR9_`vxrY>~-m5i#L)y!|4SkpDUNjqj{JhJKLxIun0cSTwTK$_E@cQhiqz zo1_-kUFuHr9z3roEuOCTs33mSsEl?cV!o;r+~zA%AgteJ-O$#s{NdBG6FVc!_k7_rUq5`}L%(R^Ig6K`mxLyfXB=@g~ps zfqh4Gm{(3K8Y~z_UY`kQ+_iioYDZ5SZ^amPYM$*k_L|2Qg~#U;Da`0y#pmlKr#L4e zlIOdw9rejVLMWN@5p^9fR~y0+MQB^@aK$P9w^YmoCv~x{qa;VRo|$+hJ;Aiz=1RhY zE|i?moV|>c`cTuRx*1SbY818k%g)kSm3b?(Gf}Smn&atIdDw?&El=c?I>6G-?#7)u z<~wa)IsLR(r42GX=-#0*VZ6C^Fo61s+e@s|tw6)`iV3}ZxrJ9+L+Fxx(6RH1Gu_W5 zYi5bRzWc`V6IJvE%~Bwdszb}}`_+uH%vEe_)6eag2dw^{3X^s}^7_GY{q*(UP$=RVz&c+{f9 z`HFpDjKLd-qk4yE(SF|C;v0(kewgB;7n?7b*O628gDF!(zY9*my%|5i_q%r)L927C z@|}~~$uH&O-p-dzH0`h@brABJL%XwA@3Z>)jQcxIxFs;V9_Fl>Oa>G}eUC6P}l z$4zv3DONjNGVOX`F2j<<#Qg`fFAJyNPCt6g&rK!aB!b!fv^jTT)aqx+jew{XHz9VS z73{(HDCektvYNh|S%uhCjLv3_`4JjlZb$d{+RlZX9ZgHSTtI8Llalw>z0blJJ1x61 zeIBA%H$Bm&)B$d4;e$*NyQ-jJ<8{ zy7q(ytEHph7bcc=B760OVGE#o|KP1*gN=ANhPt7J)`H&*iR#X#x6W0N9IAB@r>%8Csr7VqOs>=X{|H2(#Z@Zl zgUfIHN5>O!H;DUV3ElnToLC+*vn;dAe zWN}71!X-GwKoJ#~o(D@+6>ZOwvaCU^>*$eeErqN`AcakCq5&F`LNFr3pqlIsFfI@ zAuc9f$ZA-5x_ftQn03nQSKJaB8PCPT2t^FY$;PL>vPFkgcFx4!v9L*JWmmY;qwl&B zs(XK@ZJ^>K)%WX^yBlnVF=YZSdO{kemPI!mDV8rpj-<*CtEz=8wT_7xqQsDlkehZR zwCXie!5{?Libi?fdU`LvV$4s1GG5@4fe5r5HcLToI}@;@7#bA5c5iK3C;aRQ+f;R- zE=9IMc$1K81TSquV?9Z5zmI-*KCG^TPyHYQdUe)2-vnX5^RwDThe3{Tp9!yu1-!0s+GTg4au$OcH@ecWY_>RD{{ZGwD z-%fO#a`~rNS+K+Sv6dw;$;De{=sP#_K((~iEn(o`H8E3$-R9P2=X>>lZ3?n^mwky2 zqV3e%#O63^+f1YTW2;Qj6IXWx^n1Q zj=BQG`uc8O85<(5e&Gf2zVU_w#C=_FqB%8|S|Hl8yjSWOxQzMp1{Yqfx|3Ar!~BhI8%Tb*pN`thX?<4nq!qdH+)k)8xLvtL{Cb>U$c>lu&-uCt z=O?`>YR=N^(a1%q^wyM#B&U-*b7!l?1y?(BM*V-T?U|n3*Xg?Q+D(j+EgNdSeB?p& z)E$FtB0r#ZDXP+JRZwxvvj13H&bz0*-*$ao)H(UEac#0bu{(nQvc3f z^3?S4#_%d5}*+*2J_J^`k7&D`(sJ}Mee;04l{g&FY z5wDA!>KI$QRGz5H_}%y>VL!_QsqN1UiSHMgIj0v8T{4vpS8AaeV%(f%P;IJXW zc)32I>bTmSrH4j|5-M3Y377XGPK8g4TZu%%*Y&Bw;GuJ}7@n(ZkRUmr`RT4B%;WeS zyG2f@Jkq$fvU&@Bcdw-kjM*3^V~aYYx1i8LsxQIAPrW{NdPnMDYG)-TFQ=a;tKI|ARCU0)M{yBi{bnQvVVDDVLt|yV_qv=V-cm3viw^a(*HCivF`98r-|v>4uYxy|*if@O484MC9&B zIu5CJeTBS_Qw5TP&0S~xX&M~n3z(~`aKm#W9Hsw=-go$ZSv|VojOl=7>ywzu`>|(F z=`*C-#kS&{{_O1WZbT{VWPd}_h?hwlS0s5(+zoxcHC@9^!;17*L`mXGAZ}|Llv_Hz zD>o->4jq(!9l_dz*8lB@e6|Q%uRXW~TXtN2sN;~LK#{zuf>ap4&TDI?ce^$mdKfMr zE#!z*mA{^KIQ_#!lKLe|L}`xVha^5)H0%`7y>xFHl_ag-Y8sUhI1kaN{G?-~Wp5;2 z*5rwDVn3W*(as3G8#dRR0WQUE41RL$&FN4WR7zO2@RxQh5IFKlisnWblju(s&19ITVk4b{T))qnk8Zb z;BzmblD{|F8u53;kSagc8R-V=qV4OYu^}G%9WTE}Y@av*?~K@~4wqO$ zsResl^A(L{(o@P-r)j2l>?=zqZoQzbytB|CfITd_6uPV)Y4=HAb>7IjJ&<(aQ|UjF zG_<)t83}?*Dc<;ahz+Qgobetk2}^%x?N4rvlTkjQqpF{}FEm z(|?AuSLz=6robMEz^XBXKWD-{m!UFWefk5$9CKqTHm5YLG7T_a{+pDFT3Y)bSP#te zKV9yB%xvmgb!uy+qg`X;tuof+qY`wDO(pZX4KIoMi+b#!>`A4k8xS(+0EH^GbVS@w zveXKmriN^(^n9SSKIlLDWi>k^I!k%9ulem;lUF8YAiK4!{Yq95j~TU(z9H;Km_oI0 zA;;(Q&T$f0ZFpBD(1=4{;^?IsO|_xrEZFGP6QHG5nRv}uySk!BBO8yYUo4TW0k{u8 zslG%f8I}t?J?|zB&Wzb3tN~hs1Jdk9v#R(^gj^@{{BnXvt8>J z*UxBt4ph9Z`Sw$Et{hJ5M}u4U=R>SFKu$4f&%7Vdu{$?E9in*uZ*wEY4db9dJH%{H zokb;IueEYncQ+?9$Ska|w>|(BH99P~xAygPNEXzxSbcEso|609pR?c3pZYsd`Y9?d z?EIsV#X2cXGox6OHVd2d(JOUDmUY3#25U(Z!Uc(7H^5cV1+{y^v?VC z;O)K>2ZO}=K*(&Te?t8oUeTIc#vk}&4=H_T?OVRo{(vu{I(#*SJ^p(V*U?T~38<2j zwE_-QYdcb8KA-C_p-rwCZE(o=&1I6-ptoZVgnIjWA^x*DUj?YUT0y*Gia?xr1h#2)eKFaGfhV|Es9Z(l=N zqay~1PX?B0^6BE2?_>lv|0l+Tsc)#YSX1x(ewEY&JnNDn_AATSHtJ2G(n!+5M}Hr6 z6r+`w4PM-Kw9az-?W9=R_9NH!N=b|z5$8Ynl@_jlaX|7hNn&XiLeb;fo7WgCV02mK zSs>6Ppz~a7y(=J>4VOKcnmh4IFB0yZ>E8ud+&rD`Hcy|D8X|;aXeqnAH)~KKg+@2q z6f2@S1_&<6g7l@7icf|a1v7##IX_vS>QbB>MeBuVudeQflf%QuH@rNPb0^D`V zfI|^7;x%X(6|Kzt3^MFVD@#;Qw72xZXCENmy(g!)F96O>U3vQEyGPskH#KV)XH+Y` z?%Wr5rCJAZTE~aaWu4!h`&dptWeCBw$K%&)vlK}e}Dldr~2!rmoRQR|Y+Q1trRsh0F z_-Vi2n>bkB*FRCFPrbV!OBBjOd7|^qB{T+$QS91$ui)q(5x% zzhFe@UD8+oS=qkXmyUath|^dfObTA{Z6AtQo8_4vh3##G$7~ax859$434BDoxk9?! zV}2AhY^nS3)X3nobEYn71Z0eLP(&UwD5|YJ5+AjreI~6n#^-vfT=sle*Cy6Xa16`% zA#*EAH%_D3m8N=nB|!()pp#j5F=iB&unBj1Z;pwv6lqNjY)#f~mQvyt>FK=5r|*<0 z*_HMt$7e-Ne#~gDS#30`8Q$suiBYR}JqeV)?Knc3503Gj|1zRLVSwBo$(^y><{sI{ z%s{;3T)(4Z0q@9D6b5!EMNr z@r2O}ttGde4@xDD#yNi)*Muq^yBDiv_a!`Z=DGjoCt3C*44P4u3K=qZDg1bG-3&gY{>_BP~+BA;vl<9}VT2 z$QsXPr7t)7GCBI?W>QHq3dIszd4W*!f%Mh~1|mD~OWMXFNZW}A74q=QuOzR3O&Zrc zq7}wN8N8VSvb0TMH~%XgqSgk7#5$T^y4;E8G`pQr0#UlRTiM7JSi>6eZCqoOs>Xtr zTry~le&};gh3#T`zcEch%iqkn7dL$g4~#@kuDXY4iflRsctqLdnvRe z*YU-K#DhHuycW5`MQIQhZpgvAPxGe0mO+{)PoL5$lA=|zfWXlHpu;+SjL`n{`R|RP z@KqJ1=_?1qvY2J+ytjBuZxn|5&1?9mZ-T*aronucypP7>>-lbXxdXcD5nr_3a^)h) zn2DyfoG`WRkpPOu@2H5ejD7ARbR1m)KLAOQ1J{0i=AyIFmyWXDM@uz;o2Kw6SkjT9 zam|6X6jjF~E(ks=Y=fY`*mtzgjJQfP2C;1J`%4ptCa^&dCZ$@Y&-fU|w>bU{={S!% zWk2*O{E`nQ!J_DmIcC$5NNas2IOpZsUw5KQ+Aprjvw_=>zJEDeI4^uUBH(rw^<6|qg(5aN(S0(+dlcdZMmbH0$CTt3p}0PWLfaWvu~mC5pBn< z41adxjojJ}{B!cCBVie>9;vTxzPUl*#6*VhN5t2A^0i7IpJf7k#!8{w3=cq$>)Q>CWit{j1#S91_%AZRq!FK!(C$huJBw1aq%xuEU(Axw#AFT89ap2C}rquCEDGBeywaW}d z344&lz_X6xLx|eZeR) zkQ|fn%^=x5m<<0)Rsz?3G?;5wTT$U)BEE;*HiKNY4>4$aN+}Sl@usZpk|uczZikqo zqgAH*t_YuPGaaecYU|axY6La;=4-NUlJqGSb_A`~fDRqi`dRX_<)8KNcV@yBx2=Ex z|NOaduC9ar_+5XkW&MtZADD5k6d_#ow)OM<3RZO%XDejmJ={tsQhJL;V{7>nCl6lG zFISS;#+$U4-v>G*{A18r*(Q(Dq5;g9#57!`&=VbpbeP$ zne&YytdY69)pJ9omBWYa<3M#TdT)j1@paNVhzivel|p%z$l$UWdOlk$Vy3o8IWw~# zStzGGIU|n0pfyzb+UEmi+cqT0HV4SD+9|3Gj}7`hnRL@Py}v%3!_{*}Wdz54=mSL> zGqZ8pqj~4^D&0C#O1%2Nf20LiNS zWyOFq@Hp05GUDh^ju`*)| zq~AB3m!YezpI7WQsB~erh{%q_%MO>nuVc0cGn(JvsQ<(RK5_T2aj%erUyg6j^|7ow zqB$KFO$~KUqVc1*u&o`4t6-awb>?F+W0n@~{}Fe~zH5i)w?d;lakC?w=M#v< zj>u>E4BtX+tLGHb^9f4JOwH%x8#|HNn9l$~n*6LaJ<;!1_-gAV2>O-|4Xy9(FAdgr ztd9r=PI+1LZ_VWQS{bcBuq(NDvFM`VoJ5iqbf7R#l0PM82`&ATaR!dZdREs7ve7v{ z8Ar%kKpnNgR(S47+!2uPcK9dE?yU=G8^K6XO9lVe7nC-S`>d}_EiW1l4IKWKV8r}9_Ow@e zPxYNZe+R>?^Dg3+tCQ{4-z|aX^Dnmm5ee63G`2o) zYp88^ZnBVk`oOj}VABDvPt5)6|NNT%I$nQPT?~n%wr%Aq@zZ;^ZUfv8cWeLFt?y_V z=oEc8kqX>4QOPRvy2bymf4($c{_vI4%J^1JAAVY6Yq7h}QUSghnXUNe`GF^mSJ`@G zkIX->XzS5i03H^2pSaz*s`h1Hx`B_2KKtMIxTT`wvpgY?NKxThuk|Y^7d%KEP%hgn96xRPA1EBzVl$*c;G+pboH5NXN=&Pu_EWxl`Y^95*N{ zKfP(Nn!Qx1KPJK{rpP|@iKwp|TyuC|z1;uk1Awe}M;Q($)`}&%iJ2X3WYI`j3bAG` zkvbMp^yI%oJv=Pnk=9k~jL!D6$ZXk0kmo7VJ(B<)CKhh1PzmPFk2NdA8lhY10{<~} z9IX<5;9=27Qf=zm_nbS!68wW8b+caA>D`={ayMR*Hl$U~0Tb@@hB2^fRm+UHYewDD z)YabjuRF0XSZ2Ni=LC|Ui=0ra5uh(@ihRn3|D zVFzrA>_88T#@^MD%C+A7uf;ngVi~zHOT@jS8H=Nc5e62HoK^oRNB!ozz`FF0e`4te z#!r4b;EM*0+AZCx@(tQU@=<-QgxXLsV0!00Zy>C})_6rAWWlICs5m0j()yx`)zPRzD0Y_MGzaca!Jw-MKN5)A;f7 zW!)8uY&Wkx7gs8@8(E6>wMx<6Y3eg5OSzYJ+RAl&ybkG>;tE?A30}tO0#Usi zX-9)|eo9LZOwa2=oWyk+AZv4>oc+e$L4h-s9@kW8cH>pF?A4C@>JvLkzB^6y9tsP? zMIIlnC>uSd!yv1EGCh#4rA_jl=xg4=s{<9jZg%YP1(u3|jom#`$ zpgVXwcC2KTGB6bbQ+2j~Nxz~<0C!kz{|A>HSky(7! zeFIn+Usf*lF?BkBpXS3y5D zJd8a9XEcGjaA2?XCv;YxufatO0J18N5HsB#w%5P@p>#1#Rh0wJkES2Z(LWy{2sa4!{v2Ru@*aLgV)hX`}7rS$LiLCecU0HBVF@=nD? zjK`gyVf!#|Ri6!p6psm|7#mvs-B?$p3@1Hw}mSegFSS zS+Z5il3^;9>`9oh3@uU-Zz)TbiLquGOZI(F!wkt%4HFWQW$f$NGGoagvS%9_`!X0i zznAyt`~Tkf{C~&szwy6=8;%2)>pHLVJfDx}c}*_x8g^t0BT_pH&kwr0wkQ{dr|0oS zc9{FgikZM~3V6{fE!L{PO3`%c+^n`SxcwV{Wg`RlV4%j7)SI<%m4UuFm%Sg|3(&L~il!%ZXnXCVxz3Hq8{)#Y@Uu$ciz$mrm)c$pnXY#<15OeThfVW`uwV$2320 zm_qCb-GKi+Wozm%JEramdS)U~CW6j`{JwR?>`4O{TB}*O?WS!;_Pf>yL=O+feHP)X z=ywq_+PSo+|JlubNfgDCn6q%8PxmSwFfJ!;=Fn-o$^Of zb1cYiOGh>?e_gdA!dw^jPd?{Az}suU(S6!b)*ydJIoD0oIs95(En zA77C_YID)9XSDcH9jiw&R+N`!sc=wEB=49!b$KDS?&QHH*^wcwQY5`zEgu4;mcac@ zdu9pTSN7n4*8N;@6{}mSd)U>W`t6pYBOjqm?UB5K=oyyR zi{~-Vxv{=2iRqoQ9ZgjgfAlmSaB-kzXM){cpS_($FLk+_dauR|6gfTJmicoYg*-h2 zU@f#k&B%b=&1Qw{`vSxN1a;Lr_;PGC8JGDEYU;KJIxwMRT3fkZm*`drDbEl3wkHVn zX<*s~*yk)z9M$n9Jj|K>NUi{!+4-6CSfb@WpL!7M)l%IW$|CR~e$1ZArsM28_kIEJ zZbtdzQ=Ttn8eq!g`AqCHZXF3uAX8Fmu<{#mApU4`=Y^B&4t7_6uPgya;I$_<#rDc1 zj9MV4_`YZkZehvy&+SR!$6~VwA`T z_T?Ip`DE#GkO&Fi2^9U-8=3*lypGqoeKxh&LF%+tAz%cP=}^1)!OwpCAjy6WR@R@7r6^Fjl zAnft=Jibd&wZF~Ut3C&`4`qDvWKU%u;dR>;%E9gyW6JS+zo=k#(OBX0e66jTXouAX z9qKca8&kC#=0)xnZ`WJ+X*Cd=mxXqUN;>rlTO8wA%F4XC?hgn;YPzdnPCC%a{6c|DT0Vl^5#lkLXtC&kydOjz74r%Y(J~~``wW~u z%Vxsol}YnqEZNVaShCBcXRFd?6e1uAGu%lIjIKNz(Wpz!>+lf$snac$Nf4ONl;D(n zKeOwZHGZqBwmT>Yo0D4%SGyGxZK^}^%h%yc+#}HIa25M;cOCAdEfoCCew3^TPO4bF zc+h&Yr$RFOo#f0bQh67l%$V{;wj2~RIGhS!X?kE~Ur(wv?isiI9@ksx@SR~P6HaNB zEtX${wi%^*atvj2U*s8XWU+9tF7^!(0;j4Cb!>~N`PKCk$Df)+Y&rMG(_t!_l7C#+iA}>4eM$!`sVrYZk@hr4> zeJaP^Vr33tdiCD-peN>AQSM#qrblttxP9pM`T?1+m-S!QiQaxLFV^q5v+6L(uNlw| z=Q`kA@a@8*w|lGqta+?i&|1o5eCCI)+VXCd95Q0#DU!O(o;qVR8`o!RI+tpS?!Zvn zw`OMfsJ3|?C-XFK4aRNy#fiX|^faA{byd+otFXW1AffJsVbQ*^QwHBbGOPkZ@V|Q# zG?DdAuAq)^niO@^jQ4PC?2zOm2VO#7+tjVMxociAEPi1yAK#N=&+%K6TPIH-_5zmq zhle+D);PnK!{>;$8HAbec|WDrs>hv{CDSqwl@eG}S0!ZuM4R-y4@;5edbM~(*RSf} z|4V!nD`KNg`H_UcWUM!07P=C>U9NM~(s6fS56rQb=Aq-y9#!Gyaz)0+*W#xweAC6B zIp13rG?=Re37lQM(acB45n-`uWzR!E$-GXWzL}=kpO-rL7=2X_pOr={Wy4i3D;Yve z7DeRjhQ8&n*ZmRphy2xx&7{B}R&(Th}!bzycbH(Im+YqCe3Rc6=& zlDQ|t7_&i5^9mO5X*K%+*zlcCgE#;t2q{9jrVE>(<(K;o3Jj4LM@Z2+baHz#IZ`Vv z;0CCNngNmR8rftqb8)r{q9j@j4r{okf1&bhX$XW`r|@Tw2@J%>B0D|&ydP{AGSQ^O5ewtJQ>-w8r!3tk&WE2pX#V_M_c@+ZHOg%C zTHuq|!)_TXf$=e2RsJPQMkdu5O8OsVlKUrB%twY_-U#JegYk9fN}Zy_h4A$jpBtX} z-I}|)P&ZSFn0p3_)a3OTkmt*;hD2RnRX#*TA)Ax5;JLyYOX^)GSDm=t74?}ISwtt~ zRml1j$)<&#s|j=K>nW2?-C$ELilP*_e@)u)cllYCPRi#Zu@i))vxo zswF;`J!o{Hv^+lf?OszGey-LGt`t0y7Dt)n8~s$1Jv01+nfKmA0!W`*IX5cMTN!rR z6ta-{nQ!^A*=|JI-9YXBz->83^La%Wd%69h-CmR^?i?xjl*B&kwpaU6cjbsuOoTU# zZXtTeyw^NXfEkfBG4B{itpAGG7vSZYJRHpScOl9T%s6`A3i9v4Mon!uj0osTm~eP5 zPVTJ+Zn$WJix7RgPlT~aA;)(>gT@5d*qMp6va`J?jum5`GgW{rGtpbhek4lqg{)i# zE`&z=EElPcwaM)F(S=sf7zoTaWX5upw|%HTQiktUm=X;>s}}OY<<{$EKl)t!QOsDi z_^>pN%{fAHRA{eTQ~lh&0YXMc!>^SyaPq^TFygI>Ng2|qobrBZ`@mi4caYU&#A@03 zsOJHxn!`=zys=u7?o%IK?!Nw7koXlVn|n zgfgre)Tne)_&1y(?VrujWUj-GR(@EZ?*m2nBAukpM?Bpc~o;#SUa7`VXCzR$F) zJw8`)V#9kUe65tKjc9`vv%tOemIEvNVCvd#rBN_{&`hfG47swP!N^S~5q_GVsfOMJ z$%sON`77uFy84wgHa6o0>d+s5VN-w{bn=)lbimZ%VAhO{8D!%SO<3hLj> zfsk2s<1-cF;e%fKP5-jZ@ql;iYt{YEg;!%JxAxmB27LeBhbbjalm>JypAou#;sbh4 z?WVNVKjcs*E?%7r@TU!8P0%ZLk2k%Q*#tm+!ZnBQSZq;BZZBDNGC@&q6^ypuxnHx5 z57^7P$?N%@DKb+-_0)49{(BftU%_=*98G*tn3V8aJnMAOe<$ik-~?nL|LVq~XTzPz zyzxJBCwzvI%fLl(4}{^bA+&c|2%H^{?f34%p$L)ke%H)p3%;$^px+FdpOj`B@Jk9? z@w5Esv5i+_Vg_dPHbgD-Z^;-WM!nH@Ive|Zo~Dq9b_iP>WFq1Yhg^a94#xM++dVqg zCY?Sri1EH~IrLlH-GK7UYsZ!7LK8S3TWWUhj#wA!QVCM1HfVNzW7G%}?Qa!NS@PpW z7}dVau$cWA@jSHG=sdzQ_|YMPWr>3vjr_s=^G~lk-7Y2^2w2)J%YcBV++wMU)DU{H|N_ZkSVP$id08 zEo|wg^+q>D{=uyMkmgm&Vd3v0u!D0wh2u*ovF5w#0=JBBPVz8)QIL|Edy$UOzK+K@ z^FAmVnm-wXpKmoUh_|GM`U_$WdaM31!Sd`WcURe3L@&H0IE3gU!a+O^8xav5jxNIj zJVv#(YRe_+R(u*got8Y-rX}TR8Z@kGvR~4Yufwx}hxD!V>Lc0;Jetz6Bg$lCrW)IRzrLr>>nrD#WIU;w0|M_s|k49qP`c? z`X|hd`m7i$^O-W-fUf&oc%xf4l)fs*wKY*H4m64TPo<5Y*3okH@AxpmT_aQq=fJYK zR33H+7d9P-O_`!ItcvB0_COjvv%RVj8aN^7f&rM7x?q2Ap8XEyv%e!{2BtMzgBlfF zpAyfQllb3Obl7G{7tbQV0aI@;`q5TpS*z#kUmxo1fG{yKV$+T`TI0aY>uTw!55ggc z+&XPW&3E)c28SrJh*OV^(>912rF4>GyG$ZG~s3=a{Jb9D952di0z>HY^?=*W0F{nP$KFtKQ?J5e-*j?6=L~z zoym2{5<9-jvNjH5=0NXl^S3rREMfg~vGpU^DB1$+yGO2pHR!l^jfC&x&01IbnSp3H z_|3-~U4kGf)T9MVDjkQzRgvxp@D~qm;zw62@IfovMF7xZIzgM*uRVWv%6-GZVtVrF|5xvfAA4Oi%paQj9!!vc zaFRp_2kU($XPe!*QuCuYirpfozdwwekTH?gG%hYpHhb2{IOTA9kqP^74D{~a0|Dek zJ-$zEnW|VDf?H(j0Sg#XKiW|MO)43 zOQG2Ip5~EfN(G$!e8lGn?i7Yuh=(so)A7Lrg+cdU=Ky&EJsiwtD^)u9`Qj6XA|S$X ze&m=vZrQ%G{Y%u~tt-n11n({wO?%4ZPdOGLHF`-4hIC2Dj$mrI%esVbXt1guW6fTy zRTgbgT=;Iz`wv~MFu?RS;#X=QNNw>~4%2FXnj=C>C?Ge6Ee6sPpVwh&An7-CnDa>^ z8jN#w3v|_RGBLZTOJCx*#L<5vB|1iel z9w-5xN4@&jau&@nN~<@Q?_};P>RBinN^O<>;6B@uiV(QP`<{mbGM!~hk@Y-ZV_>9S z%c_S-D2L`S}3 zLM1oPS0=v0(=Utc%Eg%nYRT$p_+^N#&KDFu1o;+gkpZ z^p*~rnzL7X!KIAUpv`B31Z~{uowFwwUS`gGnHNRe7)#syP-M0@7$&{>L1mA|Mw-2( ztHATc)`^TxP}Tj0QQ4jNf>qo$yUg{8^IJv%@tH#Z59&mT?@l;kH>u8`SjP5CrC$;y zsbky{eCgA#Pqf5UJyzz-m2su%;MFYT3NL~72N`v=GJYt(7!g_gq?PLiM>rg?^{GD_tb9OW?y5enevtOI7TE~u4-CeBlt$wwl+6%yx z>Mn9y0{k>}Wpg|9)%u1p3}>eKu+3<%kI(!ZUZGpTBfl%-P_F>LP%`o1)dJhLZ0kSC z4T>kWAViEh-#YE*n!YZh@2Y!fQyh<-7WKzng%WGWRk78}-#L!ng(5B1A8U9nMaWKS zT-+sjq_4$sdYX!#E#uVjTgb0ydUs8$snou==|?n=$RNDbPLJc=P*!4nWoRT5MFl{h~D zH(e_FI_fIKfCW1)+g|KVTN&RAJc&-sPd2+0UvlNc)6m{xlz5n7Z3>b{l*=sjYZohg zh*Bd95N*$13O&K9NVtzST(;Pr_vjh=C8#HBv^2cXY`8Tl*IrsTKO9QW#A%34b1e@` zf!y@A>{y(J5Aj)P#Dej4ttMTvibVfjPh0kbs1Kgo_myV+g#ApnEj}e=at>b6yl5+A z;_=hlNHpHlj>!-DKxTKzI}SMx;;H_ba$$EGt2GZAEAdKHFR^S89wYy%{Kb8O38)P! z5Ng{s`a~UyP1hr>a6mnO#RT1yKb4I?v(fU0AW*;9H@9puHN9USbW~Hd*GPVO%S1+A zOz+b~sTR)fHhBLY40U2ZLdy|pH_o49<8-OPX0QYGZT4D&&5?Uo3|#f)C0fZOK(o!{ z#oG^7rzw=XksGHaGlb+m+inNJ51(?7@5-#_xsc))WMD`F;HAF#SMsN9mPowa1Vhf|E4WBO7wE8r(O3m@0Qj4=xmsi>ljqwo4e#-N@7ou;ck`kwpzwu=vr8u>gpqF#5b#h4AGW)}C10$x(|IBapX)mY!O z{2zc<1Bz*5u`hO2zcp~T zZos~3&s=Bujd}9bdhFM7C^2Z_TJrlN#&Mg`I9tY;TUJ0X+GY3m0I^=)dh+y{?<7m! zT|h}%;O9aMD+}jFh|`9jr0^er=WnKoOPrF_ezgcaN``%^k(u$oGW>`7-QBVt2$D+w z6C?>AfFLQS$1HjV6DDC*l{IssHM8WHUSzkKZr1zZuqrPdv_2yadNfL&zxFv0lgZV| za@H8l>dQCTQJM-=&3?jOR@1Pggr}H^%e$B4ZhTMUMY zrA)z=B<3e9Ns97ndV)_}$4(Uu?e?{hpKgOFBrWcZJM>=_i)HWXh>JdFkU^`NM&T4OL4>4@OS`NXM{ z=`{a~8OB73jV&I~+pz=uxBfm0sG~BI={I6z9>1x(~PN>A=5tS(4NdQBQ_8Pj=$DP3jZ~wRo=@IzC3$+A)_K zZp(50Gs)n?;rXkY8JPv0c=+xPQU1Lmxwlof60SD8}VLPllj z#5cnyv+L-?4i;mzK9l_>NX`LxP=N5soIn2Sos*MMU@nsi=O_JTcI^|TV^y|272Il- zmyYmVE+!bnno!%ITiTN;#(u6n$H->TAe2h^J52e|C8yhE0q~RlPyRbt+DSFqLo`MD z3ou+9jR-eq?-Ev0+^KUkAD&s76S5_sF5rGue$--7vN>9*C#0GA>b(FHof-oJI9Cx$ z{M9%i_WTjiEt&KE<$J;kdFBR@T_(2mR~~$Nm+pLdFP{@qaB^YoIn!{_0YXbPR){pH zts0;ja%+CVk@m=$CMK%unR?69^aAOknWVx{gz`{_tZ$Ykqzn2s&|*Dj{;(jvH!qv+CY$4+iHhIMN zeI~%A11zT#lq6zq{4V+0(R4@DUq0;BOby209mDG2@8;;X*L5bCHJJBh5~~MO@aezm|vd4JGOg#5s|cOR}U94?hj9l~BXncJpN2o+*f{LZ0k* z({6WIi-is7B~2Aa%)4sCW~M^RL4o3_#bFx40kSbFp8@$`siDRfO z!v;_%EsPM8GGU@rqLAxY07$x=z4o2+HhpcszFJi+GKTx$tLl!fr|Cx#;F{Lsdqc6R ziVpZ{LjFQkyM(hDB&6erc`9yU^PjFhgc)WIK^&#!EHoT2JW zI^RgXxhuHMMCXdCx~XVK2zy!MlxQO)=G4wB-Cg?xLWbgkT(l;jSZw5Vm1EQ?;)h@H z75|Awara;nsQkfGqW3Exj^!{9dGK_?GETazxT409zAx3}H={6y?L{qv#X?+Wz z?#w;ed45tm&2w|s`@4!VnqJF3cZ@er4#2CFaAjnw2ReEU#(`~w) zZ6by->mo?%9JJh@FsO8*LBRpHgdzqPeqKAl3`b_dir-c zsy7XZDX)G`Z1%6*;SM_T`SqqJ9+8lZw*n8Y{X-BM_r~U-xCpnhm5QsL4azqE?QnOR z^?j-3=Q+Aq`Roc{w;&E5lE6VArVQ2xfS*jrYxFE^dm?N#4}Kut{$>8faVIVQSvQdG9i~dQnF@gwD%;SZ|!O83%$0_3z&8| z0f0gO@_s-52bpvL`jJq697ba+t%*HmFN9N zFQ0}-?^q_RNWL0W-1&I!gKrYKBCc{PH?FZI3eQs2q>;fTvonfQO^0RHi0O>2U9A@_W9>SXOyuK%4UaFrbH zr*QtyLVMRv&d3RSUM?uevFG?6#Dtz7ky#8fC+*9^9yt+;9D9sl8i-@tqZH$l#xBB!)YjsK zql5b-|0QE!kwW@!ezC3RqH4@m!U*>R^hWA8PdW?~a4P<2%cq3A-lUh}>Gjn3;m!?JQ zx8!dig!?yh*_apgxo^`h=NTA6zp6Yh{B_J9YEl{qxyj;%w{MDt)%j}Oyr$2$=+Cs) z)=izOI=iUYm$Ev5`^65?X_N1-dWqex`RVwX-*W?AyuPIKX~pHXs5c;JvS1BWE;x^! zks&vDm0%5*`VtWXEdBM2VTOd1g@w1wb6$v@^Rb-AJmp0`gt^~k4il4l^)GIBO>5Jg zBB@PYEzN?zqU>Iwalb~42=AJEo&ohi5>tnM1Jzr*NU7q;mi#W`+i;>C2-8TBH4UcG%JW`lF%qFSJ}< zAVU93oIIvHxip{rFKuaj%whJZ{0n6^1Ni0JWAFuN83M-izvNWGa!oYUz zo+<#0f1cVyfB|g-XwYldkN>t8*N=-Em%fw#4>%ehag5P+9%Hn}AZk=5|M55EIQCup zcOaw%@J{Z|#BI85=f?|UKGf7N{=C9igP{Lzj{l2?{(s}GH{>BpGs$qSE|qodW8$}R z=kKTW5c{V5im{7n-c!*x)`14rRS(wxe?Q_F>z)KG`uVh$Qpp>ioK zciM7;7fU}}bejC*qsFOaz6(()dv&jJVe^SaJIja=nl^E;G@0ZD*Gh)AhgKCjvldF? zF7(&YTze0r0~_=nI)KliHhG{{Gk6gf3pJSoT&O zrH_?c9RscRgRWAOzqEf81TemO`u^LCRR=5XwTB6N=J^1py4&z^bqAZ=%p`ND@>6@% z1Y|#2wyD?#5X+`1)iVSC`C@*nvDk9vqhWLA0-V*$c|2vBC!Y%{{6_bkq&r}Z*m4{A zycUi0TiLuhw`us6IU8S455Vv@J?8tYUS3Rry4!HXU3n>78lH9oQwI&^CSQpqB z1gl?}yJ_7y-sH3ZlaX*f<)EgO1O?5g*D??6$j{m-xf}qWTBi!#(@yh@1s*Rchj0i+PfctXnmW9X(wCAOTRmh2Mw^YJQH!tXnE)zY?~yr|VkcBlkqmUnAjR z|9+ThNqW8#t|5^e;V{1D5v(#Q>UvxLr@Xhva)r-MKOeASjQ#Y10yDv*CON?rPhjvS zv3~vj_2&bj<$+%7833&84jQK&`oSO44!giFczllLvsNO+WR6t-T`hUcMCmw^qa=!( zyF~F|s~{%m%hA|vWezamYVA?uyVMc0onxS@uls)(S<6QIBfo*&RW$c1G z)VlQYr_*hvRzLmMvu=9r{qUu1dgzKCoy$4x3{U%cwBJL$mr{30I}i#1<)2eU2427N z+_U+cT9h2iIEV;4G%v1w8ed$~JUh1&$W<4lzl~Adiv4G-Oe+wKhGZjuDra@c>#=ym zC88y7+`InVfMYY)xOUeM&TB(>|Uk1}4L`f(K`?;+ymB zJJ#-=Ac*sgqv*g4lv6p!!FvmyK<&q0f4R1dEHuqZNdT5srjz%jOe=3{f4nUQP9aub z+Em+LZMI64j62wfv~9MnFi5RG@~5*uN3ZW-zDL&vbr#dehD{O^JdXM^v7Jn>PUL>@ zhc4AO6ZfK`CQ&8nw$}kW!tt(Y{m-reoB|Hsru1E{$s$xucmP?c?yY;j>+W>XxQ}gQ zAd31tUMBpkVA3&T8om2;<`4LTK!?7y*k|f&plTDNP}k{Ho6&RYMKh~ue7uf`X`;0S z(ENF49T9xbt2=@k89;`qu8bz$x;_!CV;hA6f4Wc&gGhmV+BEQok1|;zR58l8Hq38| z!Z8PRT@WZbg!byhlAJ+|{kQ#NNY!fH)c_;-OA+8RVtc+eOD~T~3iaifN^Ic~0;9S=Zj)bn3J%%@YB@XFCf!fppxVCBN@D@umy$ zqsy-r%2a6okjOFzpV^ktPc_@Vz8nfobc{Q-AjQmOl1o)|t6JRnf3}%~)0cJ-`Z2(U z9Wc2JuHam>r(<3`JR246$2T=(+~A<^|Ln#OL-sqPzDJ-fzN3Oiq+@{hjs#yge&n$U z$2nhBpCAA*6b)BEL`|>HvLoEuFEp?FxY$+R9!*13`qV@lhL7oXvN;tB@EjyO^=F>> z9sY6u25mLyX|>(@n=w9(plPg$=reqM*1vyuu zov-q^=WR&V@eF)}Q7PEhgkN@Rv-Zy_@DN~6<4&N$c~W7Unm99cn@iIbNPIlrHzBnc zO>4XTJ)fwQooR2`>8f#fx;@!OBIVP7t%3>FGes#TZ76UeDHD?M zUSp|>SI3JQLzDnG(d^C>Wg&%mtm~h_+3nWGQY6=$6nMI?9}g9RxMV6wVTxZd880?5 zW@1&ax?_-c(pDwNI~IH>DF7|Pau+x80uqCeuU51t8Rw~=SW({EEbNdsEoK|?Rzau6 z&1%$eq=oon_}1Uwe3mdSt4|$~;XIo3!s3bFhs3-LY3TZ3xR9~QZQ^l*kS-J;R>KZm zVltF$EmOUuvVZ61P-0HLXzrs8B%I3+?B{9fZxN~iXYWpC zY(?N?n2V4A2W1hGc@Q^3gP$)-xGl-BNm$wuUpg^=c5yTDXjE>wmktxfCJ#KSk<2lB z{iil1%9*#S*lkw4#ine?XP?PO8TwTldG#sgKmvCFVeT_q>+7QJEHVE9q33Sju5W#P zKvVl6#qQ+ zbQBlR9#UDxS^Rh|PPJ5XY=RFLUH+I4apHSEC{M`Sr)#IgYgejfzM*GwzP2bsWj{1Q z27IncmDGyr@Jf}FAAbI}OFga3tb9;537oj;8h5R*I|J-=#l2;uF#K{J-vc2fZmCnp zIcy);r8caq60%<}6DSV~G{C9w{dChRUu|jbBC9I8VfAjUHt2C9F^%?S1z7zE<;QBZ zMyAHWLH*MvMY?8{qS!AfF`{J1XlILVkycmF1~Pc zxi~5bY< zZk;PyqJfT@2E0(}QINakfdO}30e#(FhpubjMI~|;f05@A@yyYp+}=#78Le65d;>ku z$TDoGxY23NI~%>lm*>p&su`#@@|FJAJ|@pZwXa~>;`}3AtxPS|dCPSbJr$DL#YW;@ zNp^oJ%o??|{E@rq1mqvc(>mdLnU$i++0y#8_U-;NT>LucZuQk~_ZaKRkS`#)%j#@i z|D@)phPjjfv~qT2e8-Amr58_3XM#mfXTSAb0@{sn%tcr_r8|YTu|947Q0Hw3 zcYQ684QxVQa;=4bDW+CLW{y|L)G-}7Xi2JEz}ioWF6p8WDaF7r1Kz=_wiiK)rN}WK zC#mC7Epz#$p7l01-*wHdAx7SHZDzY5v-z5rB>O!ZWNVN>a0?17jy1T+T*d+$8jxm23v#>QgH$zf8(6du}{kW7d^<*jD`}Ouv zK(l>-=#N^8H2>Mn^~6Z_g0xS_PYQ2`{10eH(M~Uv<8AepihYzbVrK>1#)*|dzuoKK znn>xS7Y<0VQ+~vU_J(t1evMcsl4DRnteT9{Eae>E9}App(61r3$XH3&IcV@h)@NLR z>@5Pn?lsN#gTXV<4<~Z3J~yu^xCY2Gr^Zj$f%hH+6(yW1V$)LVbSmVoa|R|-k<`{v z`%M=VO#LaLDN(U8XW6Y|zM)5JS3b;XRh?Y&AJRhIU?8yK%KUd?7Kjn-EA?`2dX-ZC`XDg* zw3iTJdB-+gLT` z8vQHpw;Kr4$7=$wD4LTM)=SG<`()QUd`>1e6^f(R&jjGH=&tM62>VOXR3IXtGlZ&j)XNo3bN2Cj%mv~f;<=sJ2c!?p-k~yqXTnF^Uc}L)R@v=% zinui+{F;p0l&gE=Qj%x3@2!lR_Ssa*-?8e~c|BqHSs#v!6m1@0`Dr9H(qcf1&rI#( zn|jr}hUXJ@UdZ0(+K-haQD@@6BcI1osQ%Z`J*Vgny<*PxHCs-oj96ndlGL{AOPz z`^ra?(^a)kJ{Mqv8j492-n~^PZ>ZnkFcpED7z_?*Zy`9N&W>jJC5cJdaQ0DFv^^f@!z);)&>A1mRcP#II3|W5- z02Gu^Zmx@5)7|o2EgE8{#3pZernm(V&}M^^`PV=Sgd4&7SE(#J(j{N-`;7fmLo^NM zU{3LXU^KQ>rEhRdMx(uC%08e$HXZAxlM8CQfrAI#OzT`pr##z$_gEoAX3MGP)^9Y= zWf6Yg5iE_8+7J)`DPUo|F@(}RXT3C_!@p_5vI2J6Y4@rZ+MGC_eCvAfqu{~=m#)== z1awaP7ptVlAI;jtRX2^`W5Me%F?@w_#;<~!GlkflbonKtr^)1;hIzT&#;@c;!~cz z?R?cW%$B3L+f!jVA&>wSdZ2?^lI0yF0@EK}_E}tcv&DJ|qm&caJ8#FI`4pnSBs zJnEKv_NVyD&%a=}ax{y+XK6;981)zVL%@@`Jx#utq|;A%+~}!!GOyB@l=mp{J`Sa;KY3 z?BOVVz;t42?0*5hhaBI3FkQUrWS$l$#oHG`T`y*-{TKmSyvKr)AyMazg{BrtG5I9! zIYH^bjkFX+|ATj5>KiS8)%!2y$#Q=(x_!68pOq--b_m)cvAoo7$?uWSA;UbArsVpF zm?ml)Ags<+`QRQRuDz5iFX@7OW~wQ6>l8Ttzuo3wEx$5*8pa+KS@I6KbG??ApqZA) z*r(;{N}v4cqSN-mDgd>z==J)zhK>$iX|CgbH6kV5^hyP@~3sz0Bu>HQ_h+&=3;27UsUrAROy-(yxuTQjJz>=BOQ)o-auy*|hik zIdiDrz6`K~GEhitg9`bHLBTt{fr--H&pb{ME7_Doh)iDAjiPaAJtnpo8V}%&$F^`=(m! zTr}w@ZYVcz-f;L>PWa!VI8Oj#4AEGm{G90H0IUs(r#P{fpa~7qi$&WZ`rf;M(x&;C z%#GZi7`%+heE69kEvS<*#((9E7F-8m!J)}+46=jbUqzjgdp-4KZBuTq*8hE@Z2gH)7QLJAG5=i}Lu?cpagR5$GzZUV>ueMC1ipWp(#+B$|ma-SgY4;lE znHLk16CK+AO*{gN*dNwD51RI2qGWJb*WyW2>mCac>9yR|?bsX7N7FDf% zWlbX@=wM!P9i=)k^+=akahZ+K8yc_C7vOxfpCN>TOM`3ZsVXHb#c<`hs>iQWc@>6{KJTAF_qVZ zpQrtIT5}J!8<>;&gKZMGI(=D2)x_F@Nt%u2xzO}P-YSA{qX9m$-e2!6s>1Iqj+W8! znVTYAQH#ia$BoQ#nx8|NwJwjOn>pSq51~YwzM~co6^gPJ`E??;?r_nNjo;Ivw`SOvF+H8143^+!!~ypoijWU~8S)aB;3nSLIZt`K;h zD(#?15y!C_v*!s44gMPC{q~<8d}Ig&+GsM>IgLU#J&~%nkq}U?JI^P2=G&i!HmGV) zd&di-@zCnUX8TE;{~z|{e7ETt?jj@p4A+4yotud8rO$40L2D10{b1AIJHeP)LDcnEW2|t zjHpXK!@aDw1PL>5_7?ZPmNqB*0X=Si!i!OCQn^B8(ABz&Ab< z2$vFhvE|}F`EfPU?3NFVtV4T>fyqmCVZDy(FA~+|-?8bStar6^!nQX{_TgSMkFq{b z!{G~`uX}&~7tQ+Li#y&;oBdR_2Z36?U5@dfxt#Xd`#Qj*d^Gg{B@X%C>K{-D4lg`f z^_$$lQr<}F^kaaY)8F-(kW}PCJ-?du!S9to<>OZ8@6cWs2A_^O2em=DeoE|(F-Qgs zE~O7ICMzyoZvOPrZMTmIuvk*)%J3(n!K*!+t=}T$9jjl0uNb@2BpdwtW}v zqqH9U^G}O%HqHIg;!4-m$))onZo6#|vzvSL*;-3Jy7gX=5k`7Vf{nrUL+!m9FESJ_ zgxE8#DZwdM6jcM(f9shS$B$Y}=73W+Ao#tRNl}u=8{}^u@6Fhi!>j)5dwWp`CGJ?{ zzU-r>A1pM<`*?;c0=(MAD6pi}*_U4+1zHMcv!g}qv1*~$r z;LrQ5&jv)0Qq zaL&o_Q*gw!OMha{B3e$I;dvklvS2E=;=={L{xN5P;~}#j;i-hRyffP3XN!nyjkcyB zQb?iFMjgU13)DIS`uBCA9;eoi> zZk-1lbzq8!oXASlkw|t+GGO=`ZSxtBiKQ(zJ5iF8-9Z-)#dO-H`h2|p@b4(@wEj{s zO^{)^9`Bg8@`5m5ry zK%`em5is;~oH!mASeC1ljRxzrL`|Sbvm}-xM3$=+yh!18I9T3y;oB?EbwgagXmL^X z8ejK^MEm7`qpJnO*43NrXMUa>4`X_ZY}%8Iiqd@?S?ChoYg+{YRcz&>zLi=tQr{!* zm^UTlo!-pX&YarGY#E)n*AF>u50pAnQ>U@JXGa7W+QOo+MDR1hP;bI64% zC#mcjOm8tQ2Z|jYh%I4){$?t<`p;Y9hzu_g{VOR&8(Y62hh?ZBlg^6AuKv6Y0*1pD zF>@?3(-Re6AOzwg$^>Y<=NGV$$^#7BS5p2Cpw>((Z{AXa%ngd0o)a)Hjl>N51ni&c zG(pu2Pn4dg4h)Cwo`}nrphl6Jw%oaq#W8OY0qy>OE(O1JstWDBo!kbx(xrbK42%kz z<&>3^4d1Y@d5uL7wApX$jWvpD2$kORn+>m!9lrs~aC*g6%>U41$8DItlh8c9GvXzL zcl-Ecwg|30vU#fPNtM!0JxBgWPv)8r4e|i3S=Rm^ zE&$*ee{=BOnEpKB4>+ynTy_LyNE+fGFirC>7Q|?bg_gSwM^Zs|+J*zHTvi!8eiMVU zFeH`Xcd*4v7~POw1ya94VxF+V)M&0$u&{&TXBh3hMcD8^W*hAabbO~$W^tkt1Z{k< zn91(sfY+o*qz1R7rUVS%vS6yVS94IT?#T+YvGB;N>;G3`o##GL#Cjbu zIaNJ#l)z1_-jC*<3T5$WeK^)$WOwDCNVEj+zGNxb%_NrHP~A7@lT$JqXyY@t@d$c4 z2eFw`P$r;SoBl?a*N%KKP_t}TyYEaNu_@Itb2&GlAT|NouD>_i7n=0jdMVENdh)Ke zG03Tj*)g5)`U=VL{1p7u%zQf7c(+m3Iwovhi)$|@Z@tMNKr@QAL@47{hLkH^)rBr~ zOV5Y{vxs{e&OVZGXbmfE-hE{1M)u(qK|X7* z0D%t(q7~45nT!choN?T9hv^=CF*k$2=r1L9PWTMkF3e$FfXx3l;7HEGZkMM_8F$v~>~XZ&?;3`e!!n<_Vs>Zq-dsclZ6rMQBPW3y8e z!r#*Nj?8+U8w+ThUupbfL;QtE`S{9ZcvwFk&nk>CwEl+`J0*Cxa87lV6LxAmSh>dA zc5KA@=77Y>lU}D(VbZzmvTlj3t^FKsJb*S~w^Ik`(qu7t2~Q>%!7`4Jhb0I_&kywg z&q|xcll2(z1p0nzjlSB><5RF}@A^gf?Q#+%MgBF=zHubNk zFz~$i)fLR)eVPBa``U=Q*jzQ(d-<=5WTw8V%YrEQq(xTH19^4;T)8%P$O3CSyyx$V zJD>fkt{;815dA0rZ?NA@I1lnpd+knTn zz~m9MbFmqa8F4&x1(Zo*g-yef{{S34_x{%=!x_XQbEwofBzHNO{k<5_Mu?F_pra;v>~UwZr%Ber5d?WDQT^S*z*%s#JyL9c*y)!XcXhE zSBuOHoYep>R&@NnIUpN|lZP<#LsnH~j1Qs%^*&TXs;-ZeuKDxecg@%7lc{K{VehD6 zs)zh{R$bVE-IpuZPH^VbZLJvNM%y?4Uf*7$AYcc=%q@^kVWGNfAfV zI%=(oeEU3<{&L^i|8w>ZVAqyH+$+!1S8KRUhCZB)N6qR_Q03$TPASSN@R-*Z0lJG> z#a4eSdpls?pughd1^%H^-UI8?HDvnYJO4E)g9nfYBE*)HKAK455-&(+xt?;1I8cw%Y;uPDtzRP72Sl{L3h>snv zqayEC=1~U;;n)J)!1^kwrOe02#cHP;L;AY)pLGlImJ+*8;RWb-Nr(4HdON!Xjrxh$ zhw=Gb^^%2qm#oKWKBE0w^mml>a(1tS^X4o7@?3q_hGly7CMI{I+o5}bkiJUa6zQ%w zu+XMoqi2ySb1GxWYSzcn3dajyn0)a2qt|gVoMay|ye6&B^{uY+a?W37TAG2-KS9@b zfY*VwnEk;1VVfC12Wb6;q#2+42TAKpPpMCv`XKk&$imPEL3U3U4R8v22Cqj8B|rwH z=58Buj}cj}vG$J|&Jt@8NhC4bXxCl%E4fu@vkp)Iz2=5Hb@ zQkPope@XEDaAcbcnIk$gb}}`5rmINU-cqt^K^+IATxSSm=Lcm9@dN9CT)(bS`MpCo zZ~gmI^5ry%+$`aEE_C0QO85OHz_& zV{N>JNyok=0|;Z1gIXfMF9&Q9>VdhU^yyxX9(8TmYJlT(ktP-fJI(f1H1rVlIW1`q zwetbunEZ)f-+YSzgz*3TVuoU}GU^f{nVsJ6MVcYho}3LH$i2hUxt3X?8JXfRW+eAP z%blpxnO)}R&7)YNeJQWX1^PMo*)IWwi~8s@hxn}rcCYx*6+fY%OeCpuKkOTV3)2Qa zv2hyU`#k?8&Wh(#=jFf$(UYbyP{HS6d6!B|ReMGkTWIwIwh}kYFEbm$1`vabGq%Nt zy1uUOPorBNF?&}BZdXWdK*16Q#?co3ahcXn9JH0J*L!#2%Yi~cm!57IgO;<3OltCl zPPYzPJVoJ)FWnJ-6kF*0oVYxZ>di@7$pEvX=bWGYot(^!&G|&&AlFsJQ!Ge;@`04L zt?Qg#babJP`MqiXL)5c0!g=*JHGoZD`z|osI7XC_#SuH<-Mn)1h2F^&7}B6*rwjSj zd{Z@T+w_Ym{P}zhQGRzlv9tDx0^tVc(;y@xylKD|Z_+UDcBTn#ja`al3-ZC~whv?YwwM5Ce!9D|@P5>`9s*hdpd~{s~PG4VSH~d`7NzH z{Xz$<_0kK4$KR`4c8=Z!kW=sNmnl;*F!C7d_pcNSY&}cB-C8S&$_&1Gc&3>+Cay_1rPdPdloraDJL| z;7s*sNwdv@wM&(A`7pCCZj5J!v~^%)-h0d=+19H=gY?HCHu`*JbKy6(-vWER@QR`Q z&@;_wZp2*VnPKv7?iAzcNDJG0oE^thEsm+&Jn7eRWN~1^(9Or=fu-|cs2885`+kYT zNT~AesRs+X`enTwSL_cc!+{6ekrILa;$R75$O3-?F`xTsAApHlSIArnDyAk;$rg(t zV->fFC;HQYzy*9y^ELK+%m~;paGtUQ$chbz zkOUU9B~;(>eQIb0``JqR;~a5W6$xb<(U$T zKW2*$aYvB0 z+>umK$3ie*78BDNCVn3ZRi0 z5bt~2tn>able>~cU-50SM)7iEegMs{S8bF`c~9kCiRal|9J_D4<$V=VbmON>tMU3g zCv||S*Z!G~k9u;ytqe~nhWd7#T;gin00i$qG6 zDdFWHOYWm>^P#UxfE~!74u2=Vic#=4TLIdL80TRe7^<-+D0A;*t(O;8zt~ScYTlD28a2v!Q!NTSBr-!603-?ZvI`7ISj(7uk45j)4dqdh zE^AWyTLxK2LK5rJuXxC`IqmD8NCw%r65u$q)cG6<=K)s*W1U-Fl?VERrM@Ryn;WU$3m9Y>nmR553jD zlU`l;&M*&&Y-Kw#@NVQN2^&esnIF?aZpq&t9(Y63y7Jkg zwt7!Yy#WHirzpS`tfV)pz%eMZsXUKlo>l33G_6M>rI+b{R36T3TK*2_L*Gw1J&k4! zd$JYdV-Kq64@k7=_*NsoTl)xqPv|w!R-JmURb1+>o=Z6PXIl$&3jJ`zXq3L zqcwM5dKN;R3k$Zny4nm2b)I}#(SnwGZDS@eWx7=PdTS}{w>xkW3(9^n+m&2IK(jRu zpU zEvR;ENh{b@Q47n1zlqk&4#8Oaoz9Jw8fPiea3Wn0Vkey_FVa@(kfFZ7t^-U<{WOAW zg6yTX!+y%j`+p{QuWE*)@Mp-pwfl*b-_S2(jGmAp=k+sO%WDF1%X`5VEA4arsA#07 zS2#o{oSkO;ECmPxL+Whf%I%b(D)SX}(J}+n z1|zp6e;m&+((I}O;ciq*+;d`YGMcIz_HwCLSU8gIvuBnQN7yq$@lh6-OC9;yivez@@eIowB}PP3r~bm) z@5f61ORo0@=SD!1&zc(M*XmC?RYA0x`(_PpyR_Hc8fnD+@1(lM-DhvGePTIq&u%Ac zfOe*ONt95u-2?zXg;t==4anE$s+b}8WJ=2U9ZzS&E6w!Tu3Cyebs7NBpc0mpwF#Qk zn1(x;KkyD9<8U;cc% z6=>b|pv0#_NO@Of?hp;S`K~;Dpuzb)=~H{bwu=f31SwSILdcA@IZwSJb6MvcHR}wn z3_3Mct4z4gAr3ldPF40?+I)Zvh%cWx+zh|sUZ`8k!gf@84V00>w_k4zTc|`8OdKpC z{4$rJzeao-9J^ky;|XK#sVO9MmO8DEzia2h-7$XQ)xAf%BIFd8U%r}v58;xupV&UB z^pCi>-pWNp@$$i(mE78AkKfL<==-h7AkJtH@u9_s{U<3OIA`Nw805NTwJhNb?RO$E za`m@Eq46D8hwt9~Tkb9PHyU?!_o9f*)ruvux6^2sZr9MaO58c7_C}E87505MI@WXG z5#Z`-o+j_nwyV28I-4`^!YJ6tZt^_9M_}jyf2H#%QtOqj590da*|O^!j50Sgz%n}Bc)?frAMCA0OsO4U>ho6yQ`GVAJVT%IFh9IyuQz!w@1%`?_mw=*5z+6K{095K zuA4;$T-?e#8W@GDx#nT$GFC_lW*G`l>Ha`gIWb&S)z3D0ZwQzhy}8ByHB;i_#RUo0 zi7|`#%Zn(-V*3Yov!s6T*`6btbNYm;(TUXuqjQ&9^^W4%?%JDQVjgTWWxIz(HTlVX zcPPt3EeMr?I)Jd@B7B9#f;%NlS#mj_>7|A0Q^+8Ga0Wctd?nDXMQYGD6D`(^FKtxD zxW}nCv8w-CaSpjx9!WqdQz;c6`nQa#HZ^&-uF`1KhwsZHci z9%!jKKK6sADZK4VR;uc({Dz|0Je9oAsWdj$Pu>rraqb4h^FMiD$}Q_eoG5({9v^$6 zygo?l0;C}?y3`>(?#osKXl1lD1FDJDuUiLIjLMsW-kQT-9aU`nBtSOd`39cctwB}M z1hyNL**5}s!Pw`~sC5@u5&Q|)wqd;D6W_#esc{6p;IqQ9J3p6OJWrT1;xYP~J8!VH zTM7(w@Eh$+h^dOQUB?6VFT#{V^?2PF z=MKxFa=X2B<6734n=Wh}%8WYn>Y0>*1e#tROC5LY zXy~bHs#GO{s!$JdXApi`v;Li*ssQ=T^x~f=7lRA=!kdOqj0BDe?oDTQJCT;VV$>0~ z@m!a!v)?^N02Y7}|Ga6m*PvIb(usOo%7XH=VUYleg%7r-gJqBwSR^nLd&OSfCz1XJ zD=~6#=fQxP{6Rvb_Kp6FW*JX8J79Cl!f`B!zbp0ijV1$$ePL`_!19s`A?Ghx4wa|S6|vVa|I;U>yCD{*Ne}Bo!Rh53b-8=e1{NU^%|G2 zrTCFfY{SvtPChq^*BD5q-3rf8fL(?NdLlcpyp&7q$KApRh(FG443!m{SvU}^%y^EU zAPXdH6kkH@n!31_C84A@U~e}ul=9hzaE0AvoN>3mUa@}&o^d<;MR`zw8wl=}-~Z~KLHZpvCGn{mpevGpBd;5T* zU(Ms~vA(uu4C}7sox^ahx06d4m%G7`kmY`XWjNz_l@@PPKwdA9bwje!NVzU@7cSL* z_v=3CW$HuSn4h37=7gxejg_@cKd~;1Rd9XlF%iINTKV{tmPkOs)eIuvIMpf>VX(Ex ztP@v_!8tvM0sA1!ME$Fm|HlSEgH0>kAE!SYZpzib&IC}W{UWF5TIQcC%|8&*df2)f z5b?@@>|65IHF#&BZm2Q#o2m9nKveJCcexp>qO2Wgk_A z`QVjVoPh-~s2h1YYLR}J$!|KINAr3;Qa&4^jIUa8;yH?HS2&Kt29;w_9r7L5Q1_Ye zK~f}`J>eB^kO(RtUSB$nF(dwF0QDNeg2Y(m*e{T!TY}U&HgN~LOYlmdtA%RHOJ)%i z6IrvC`IXu5uaE|NgV~6j& zund9YfcbY<$O^B|b29gqGh4sedBUl9PK7ssDjTp-HHlr$@Oq|B(wI_*5K5QIV(^ui zVv%ptaFbPFixNa&iHE)#`D$#&rLo``=TP}GzIM+jAX`B2j|W{!3oNmd!Mw3b&T21s z@6~4Xy}qEKYyD|jtyRGClrQmn7JlWrEAeYkZw4~X+kK=z6__!TZDAZFg4d(^7rhg4 zsr5>ti%DiHjeQ(|2whQ^z#e?g39PoGBe2baE`>Q9B=N>Wx5fm2y1YVWd5=!ziMYag zDl4AKD|pa(9Um2YCGgQz@k-V0T_sm6z?8R?_FX*4sPCNa1hc>%=KoR|cpXQdR+s-Te)QR?&Q+K|oPs`}Uz z{@(=6K}7Oh*(Q>iz=yl|NIuM)l|^Ciww3$UTmWfg6c0GF zX-w?xDN6t}mOT_VH`&m?(&wcjx$^XQteLz|dbK~yt89pJTu}^y!|)pox*l)K2q<%) z-`#F$Dk)}O5ZeDkv^xK^5IDJ>f@S>3Nk%Gm$`jDR@my{rN1aB>^RTGC8|G#7CZp=7}bIYJ9BVqrAC)FL0^7$WiB$Esk$} zu3|4(1_9WGa^dn7TkJd1TIO$PkH>78F}}uDcoQHTn{Sp>ipkfjr#4#FSj4E05Q+35 zPsS)^zK}4Jpini4?N8_xMAWbn-lTr1a^=?Oub z5#xN68l z6Y=jVD)|%Cwxx$|gec^Pz>&lVWe!}-^ZD}CiM`5@^`8BY;Q15HKLX{bOYb$M5(3M> zA_b`E_JuFxV)(O@J@?U%D(XFrLRaK=zqSlL_Bm-;Vy2%UGv&`}6LAKAIgq^5bwYNX zN5SM}bxcT5N0`e^!dP{|)FXL3U<5Yxa}5xcD?!NK0Cz)7B21|-$_C4eZc+gbm)DRJ z#VvZ<=e-%Jnse^Ii0`~1z6vg<(Y(M+ZzP5p2NO^t+}oRO_7dmep@xUiSoL^X;&% zf&9idE*U%yPvk#Nea*|l#y;?X(|hJ|1;*(7bk(#i_S`s4byIa(R`&v7*2?qdd|1Gt00AAT(cc!or)=HL8rvu(1W;=W&HTe!jiI^MwbPmmE5Nv1(Fka zEJvE%fm!fmz_3-W*8U^$-TvsD++D6}X(08s(6=^XTj;n{e`A0P}D&av%QFGor-~-OK74rq9jb_RMa@w$C{D94(^D zI1wo)TR`lj$klA=syrYpnQpuJ8V|uCz^iFBVjE|3kiTj5Wy%I+CTzh&E~vhGZYldJ zD0nH(eeBzryCTP?ZBZGV5dt8w`0kMssrU^kT5KN4*fq4*LvE`gGyAx0kwj&qO1iVd z5DsCIe~^{(-%i?JoS(k)hBx7ZZ?j!`W?~mGZL->q^2P*W8iss}F=XhiUs3FtTlKnH zz%K0jxj2@YsVn0b1}cWG%~XIsAL2NGiBu^pi*L;^yZT1R_M-jX4$~S-J9UH<^SrBp z%RZLxPZpexsG(cw2elARNR=`3@hwa`9UIdZa^9S-ET^+q2O?; z*>VB+7kyncA(3czUMqqA84 zZ(0KBn7sVAXJ1{l{O`LIAhkC$1fq*o-!t3?nHgdHjwa){Q)HE=t_!a`KaJ=Vk53c$BGgNpG)U z0ft?Qu?90hnE|$CGlkzW{+M(9zuoKqi5(k|XY#ajxn7nyn4Gy=;KZ;Sp%DcJt_B>s zzym0Cl4pGu=K%?X-hU(z|A#7%XOA5&Ul}L+vne+-#hZuS0=T?QjA_a~i}DD%n}F`6 z|6XhUm74s2i!*`&C67vC&7G^zU!hdzTiLLE}P+v9wY|e1l zUswem)?(zpNc;nHPyR;IIS0gJh=fpIz>CHZ33lBc5bwff7Q$u^Vax^dV5Z zy?g^HZ?+5a;0Izc zxwtOEoe#i4Y=_oA;~0srkKlC+rOyUjwMn$+<{_lyKOu)0g zBUM5NewnRp)1u#7*(=9Pv$6yaI~%TUZDZIJzOyRk)Vdfr*YEXn%AG7GR=mAGy@gfY zdOp2*#F)+F=u9%p)sez9&r1B55yzWqd7%mgT5&1z69oD=01^lwI^5sv$sY=4n7#Q$ zC6rNK>K-PINhU+knF(4XtH%{D;JE`@lb!ml%N6^hr4z~e7B85!1XkzutEn&ktLNI? z7jd!rRB_dwLHsskXMr>kCC2}OTkhc3<&|omVg9-ZTl>JsHX^fq`;P9qarcl406DWo z#>frsWs#;4CdkP3|0%Kn7VW75X-6y--VbUEBK!eQNSoh2 z`-9hs*zx{Fcw+Zl^Uikh|DBbb5cgf|4Rh&*Cjbw$NH=PXJv> z51=cN#s_kVIMYNd-nUSS@k=}3GSZtb0B)uKPX!U6RI?h{XxeaSN_AFC;TF&S>a!oE zSl_}p6tE^S_VPyc*-&*RfEFRZY5XE9`1_S|H==#W$Dy7qTWHM?L2t>Fk@abyNO=U9 zBVb4&dJ1;XWFFgU%W)C6$%mAm1A&O z%4jb$jxIX7-@27GQmtBxMkH3jGn$yLXe;&dB8GYs@m-AR%_AQ4jayTvU6lT)&;06e z%(EUH5{NZtwgt=Cpo-SXh&~W!L1P}fm>7sfnJK5uNe4aQ{mbd$wDqJ{MDGXoR`Io# zOq5I_&i3dacBW!-QCJ+yu&L{PH*xR716!M-W~%4lnuMEJr=U}#*ZX83X3Max2Bov$ zNr*;Du*FNMF&$33=lrjsM)4^@M^RQ!_>62aPyhAUo4R4Fb7oSE#a7#hdUH8)Mo-wx z+!`5thY1avb2nTjCL~z@>h!R{GntiAW4d!PbXemrS14>Kym5#G2a7?`Ga_cgne(p* zLjv-Hes=T4GeJPdeTk!>@!8+ee{{^G)hcnwL4BHeZM|%DRFow8Q}Iigll7qXe$)6K zPdJR--tJ`c*mDET1oX@%kM)^5tb+=Eyan(w?1lZn2t#-h;I`^$Px|)sv;?q+CoyzN zr=1P$qKLm33;nN&5QspgDPXp-rt!9X{j-X71yD=?iSbR`KW!i|ta-<7L{^Lk=@{$a z5Gdfp+N-&{XDrPWphi%COO?A`2BHUR`S zz$+RT(f$K?`qq$P)_%iF!aRVyPJfkzZ|; z-0R6mZ;9|5280TI+L67SL19g|UKq3g?^oJXFGOh4jF}YSoUs?H*nV0=-7f1Il)GoE z!m-lG!2|1HPY|jP_csnooxeOawCweguxeTT>sgew+ztJc{91IV7Cv^Z`0Fk0ql063 z)>H0oC`sJ#LBq4)D&Jbf@A$T|y%S6J>6cEo823+fI6$LmZ^?=zRnp44F~V&2JCUi9 zc?F~u8EUdS*S~&S)J#cgHDG{$#)vqP?LcS@!FxtXkIhZSZ_PHOv!ffcH?0ZZ&Mzjp z??>`CfsyUij{7U{Z%f@alT}N)0haXEbU6P_B*OD zB=KogW5@mEKYOuvQ-iY@yyy*8w)I_{aWTaNG-S$Qt%p7l6vq42`%$J(i`6q+o2F9I z!Pm&NV~2n^6mOWbBX&ef ze?W!spTimMf5|o7Slsm*GHDyQ-PAk*9fuEWsc80le$EKD&)M}LwncVH4E@4dH&t@` zA2h?jrn)>N9UI5z_cd6w`~~WE#kBpQ_1eS1$~m8HG<_wKL|*8fCbri(bqqPs%Lv(P)oND{c@P=?^3lE8AK9CByGC(gE?mny=cE9&w z5oNyv4Y@tKTyke&Vqo&~i}1{&8ie$2+T_a2H^etejTJKv;;+woj>|^e(fc;eo zZ6dK=)o7|gguT7_=D5wmeYQrgnko#nXv*z9GusM#)mF~IIZqy?+@WJd3tb$vig!;G z4J_i}94a^QC(s4?bBRIVjj)5I95UCttS6TwO@jDZ%oY=MWp%LN)(>&Ft6G1`?hB5y zCfIkaZX!^Zxdr)eVtc1r-j<2o(JO|ns$7(}=Cth;4Bc;*s`?I2M6G)i7~*n!tC}tk zr?Sln*e0?+eKTxsO5bWQ#rD3VlV56}^AC3Fd;<8rrtppsnLe2!f@2~@@1*FlkzQB3 zmM>DGvkt*f1U1RodbmgC>Toj&ldBB@X&K>hd^D<2mn=<9tLmC~S%R9YjItY8$1RnG znf$DDVI*EL>G42Piy2#jM;1s&yo){i1Bo(OgmG4uFHX37S1pii{^bm=c7I}Jp_um| z)nYMjjVJf!jZp?=IH8=WDc~B%mL<^`en=+GFq|_WNC%Og+JXR>dX@Fj>`v#WOQGtL zPo>0zw9G8AP8<0W$WXwtB03GeJF~=v_J4vi{ycoKVvh1uj=50XVI-Od9h`e^@Ev~%uw73 zI5Y0`!Mv!EIUIYPK7GbIasOys5^o>e=a}=TqPb=kQTJk0o{|ha%!`um0XM#0{xr>!S7ZPrOFlLFh$$+%)%_!A$N=>1nl6 z%&Y@9Oz;Cd?k}JNcloy3^lFqhb+$OC_TFlEW6M7(W#X?Z}~IILq&~#8LJK z0WtJ|$gmYRmq?1T*FLts{e9cg2g1W>PTBaICx=KUl zn^OW1NY|cpIj47yWX7YKoqsFE(aJr(fuM<`z#-Lm&j8SUwRg`Kh@N&>2!cfPSa*+~ z!jmNBe-G=)0R7{_CMXkf&ta>kjjp)OcpN%xBhGL0E<+j_dj3^Zhi+M*OGuu* zN+72;?+600osRA>P9Dyk%MA8qJpGjExuA5`j#FAK{}kHe#l48d0Q`*K|&vfdF#fIA_$;A`VImChufm*0d{)0)qr)k4)X3UTe!}zQY6* z0OqRUgzarlL9GVqXUMKF6vbX>QEZ%iFPlN`&NGn|?>3q0pzu};-cDgK#0!o|#Vw`r zb@-H{2+Z0nuXu?%Z+;B5PLPWS7lU4+S>jmd&uwp>pmeC@D%S7OMSjGIJ{9BNe2XzV zLj6;ph~(krTPhaqga7#MsgFUx=`3CxV8J+uk``<3>qaP%a0VfdKDuch(iBj0dCVJW zT9fYe`b*QBVb|)k+iuZ3a-yP_aaVKHRN||owwjFw>9ZhuVQuWdv{WhKJp}#;(lWK= z*c9!UtPLlWT%8r;%9Gy;Rt`1+wWPi?1Z2OJ5+9h-#i7w zM)-|z>mLCTaS19YMnd}Qg8c$Vbw3Xe+Z)_)*ueL5CT_XD1VvYmCGXw3_O{ zXeHq>M%abw_dW>%jnxe^R@_Dgc9f??lE8Knq0fzc4v+maLm#kc-8jSP9#9Tvhqd~p zXav6Az3z0(wAMNw^C|Di8jM_#Hubx?tvN}_v-Q5?fQhTV&P%X6N09XmcD zvnyLe^TFq%x?k`&!WRXkS@rZcgmdWkizuQE&XM8l63O5j zv6CefTF#m7a;3}5%k-Bv_ASj~Yt+OMr_Bg0@A`G&pX@Kk2$$v+X5A5M&-R`7kq|^y z%3kILeZo+khvWY0MN%|=>7#A!=mABeT)kSQLdl#N_GW72kG9z^i|+E@jKi+q?xb1r z^6j$0=FLF4y7{n_Gpwgcfs6oINlI0pq+ODFx}Wd1kBtqt(Gk{9?4kTpkt z0ZX<6U^mUkk^=|+bYVcqU*C1R>y=;k4w8e1mFEs7Hp93|Ufg(2lauFumG3Ww=E;aF zJ`-YfFwtKzCY>GvKKnnqEn?#jq}7@4HODh8)Vj#hIFo1Ag@m=;KByUTYw zjq=ttR6?HGQmPh0#TYsKDb-QhG?>9S3(C5PUa2qo0p|dpGAp>yRi{DDx@96GR2kpPbB{tCgLRSK?y&GaemtS>2SiN#bzmn(hmzk0=Ixt6oBkssaF zQg@X7`bOhcpjS~^*^_t-@@fA_Nd*Tml7lk!^KZO0w)&=vlwM#L?6%00Yk8NyR~co& zFp6ekms*Hc;cvFU=9#RaPkBz~=U*|_EnRC_!1e1SZmBL*TuoUQy+97XrSZy=JJR_s;oKZjxz@bzF$%f%_Nc zlWaFrk|_eiN2cD3@1HQ(n_$r%GWk-`dn1(1hHsp%YUJ%N+9WA1Y+z$vag!9Yt`0P2 zY=4Le&bu5O9jqzH)M|PX?$z2R{-z6J_Pk?q)=yR920>^$j%@$lu?Ke6N-9(~Q|znB zxhL$Hfu8{n#d?x$y6$F78@$&-S>+cc{sAm@UOT9ja5A5BnDQsIC=s0u`IecqaS|*a zi}Rk=)iDn{8;G_z8khVF=@;F1y7l!YMs`jVVvSZq4^+4wI=&h-RpsR{9p9(LyS=`& z+Bui13uAn0Wc=2|nsL~Y)K-p?S&{g;7o4aaq%jl%HvHDeUjmsr%Ac&=Y>o=ycB(qQ zN2}Qiyg%F-JvlvhF*-SpjL84;Bw$ZqAupjBVTREq^$wlBj^8yq$3lFE`cELYutuG% zaSff^7`TCbx3!vRDI(I}1DuDoG7w}+t{Ccp1HEuX6FZY-Ho_@S2&(b=$J|!Q@7bPm za;?REFOl5L3ntz4QlWvl5f3;fmFDmq^AFb~91&&>Iby}{7&jCX7oxlGh))jB=_z`C z5Rh0#XRUs^u>CWB=epCRnL$TGOv;V~jF^C(yuZK*oAMQ@aosWPtMG0M=Q-c(W6C4L zH}G}S)^}4nlwH3NE+%1 za(HmOov9#7>*n>0}pFEHSWhrC$hc{%TTsk?Zg&arcgis@kJ1A3Ln|g@>N-qCBqa(^nHnlrH z_b=lwkOHGEP^{Cm5yk7X^NpK5h+LlUYiVL6{@vd%p5=H+Ouv1l+vyR}y6c+VDYVa{ z!M4=}8`6mn-MrZOg`Q~$_m{Qz*vwx2ogdJmlK-K}I;WCDPt5R=9dwiSfJJKxkpOfx ze2T1gdGV7*Bg3V5jSm*b2q)UYT9OP2A)<19(mG&O!qQzz!{*U6Qy?%~D*=EE#M{>A zWaWQT+V=qSjBx*V6i1H~_#|{ow)v#`qmJt}@`Nxsm9$p)q7F6JPq7^k}%0(D_$OS#>?!9;oMh$ zGgZD0F0`~$f0Yx|3(3qopr$;dKSOnY7$}hRpWqGp96hofLV&3U-bvA*>TFqqJFlXq ztv&@ob6!@9~{3UXyLfNi)0gWBRkJ~-QE*wHyb zHt5gTQ*QIGZBA+x-0tIGwvk`-6?a?*h^{pZk_g&cExpUARWw{Z8D}eJ@Tuy;HFz;T6=U;RU{Ahf)xM` ze7N#8EOu%aK61(3?w)H+tyz@cTK`z~AR{NCJU zbDN#>mzdvCS^h9(pK@X7V0Qhfk&C*}2~1SYemBn-qtV#OuCJHWm~5XlF~n~;RldzT zYvy|Ujp4Au2}9`Ki90P1A@dT{9(V7lG)jx(mYk{UuyyB9zQbHuYOJg-b5&Wg4lWdb zOqe}!1etdOX$%zrnsY3<609CC(A;03U$HYq&9zJuIbOoM71re+s6WENcCwhCVYd+f=x1)IGV8TyC_XTbAFK~5McT)Y|j zC{BOyv9X?(Z7O<`F(2P2j$TUyt-Q)D%&oere8Kik%`OPp)L!qM0aM20HNw7AHG5d^|MP10FHCV|NuLnLX$N4XN z{4r02p+(NUzmr(MBsCYfeFL#W2hAt`pPg!7<| ztq^K+IDSQf`}b)Vv7a`HJ{z!YQGX$iWE=a@nP#bKpRJM(`7uuu z%(glkPpGfp>`G&B<9rhs0tDy+3e~~3OG9fXaiXr$st3T!xu$C_&!!|Juj z1!bviua5f!VDll0XkRavUtX-OQ`Je+>mH|u=~*|;71(&fL%e66*-^UGp%|4X+h1W`HyQUsM=A`)tV5K)RsmkxobD7}{;HA?RR0)$Ydm(V)|$hU*%+|POLalSF0 zZ+!Rr;~nGqwUg|fz1Etm&1=rNuE{PlgFHk0k&iXLeUFQa#d50cSMZhfMe1rcij(#G zJO71rf4m>{02lMR(;dx$%|1B<(+0{5&d=`A_q@xgPKXdtplW;=ytOO#`To?3ij(F&+?A?YaDh>w8&2$?ERG53U!Q1YmgaKKoneyzdI8mXlayP z^h4pzri( zAnDAA0csWvyeTxP#B%uJa--K3(f&HvFQ3Nc$K@`|Z3BQ^H2+z8^CQAYLKl!672s(^ zO#XTs74p;#E-=$dHJCzrmk%T7M(;yw160ji5rtgv z7=CJ}rKn}6_HyGC#9M~Yxu6_j<7`u4ev=6H)1OvCmdV!p{K?r>0;vSgKI>nXNcl`F z^)~ZVhi4u@i+z#vPr%$>ZGC%x>}c&hOq@JPWE(dd)3vm`MS*_tzv6{Lv0@*NEG9@l z>*T2i88dQsq(td~6EV3Lmd|1Q_y8_gGsDr>ay!TWuwM4sf7cI|W_P6m7H)$uxGd87 z$4TwvUxfnh$^q3cDqK!aCKcl^N1%e%D<#Jw?U1y_XIITS*ec(+NigqRM)DojoV&cG z%dR7n?_UmBM_w2FYW}#?xB?%*LS0{1m3MqkwK%U({O{m{uLM?PKnn4#12sdFZ$_)E$#SPti%rzO6+4>%#Js1bp2dhUVg{()3zZ>=(tx8x*{ z9$TMtttaJaOY^+|bWk~ z^s+TKonP?{?tQ)(srLC$J3zjNkITj+J#X9vt*+%*>%gpjft zD$i~9yM^;<9{f8c3B#6*EXmF+Vw~KZN?)?d4+U7?%MMT@TBvch zV_w(n#M)yaul_H(yR$DM0)=1A?sb-YCN|9%;+aY4;RV+WpEL{KwTMQc-bUVqADk9F z2;*4uDZs$mQy^NN*DvSN#n~x6Ah+=eANtmZr^$}p0d60ZlXht3+T0N(7&@O07}Qq5 zTVw986r3KcO0H_6$3Ltnw$y2k!QH^E)b)!3qmc>(#pE3jr0Iw??=Z%=#b+{FrR=+m zF#)ouC5sl^G!&iGjwwWWw?=<6Gruy}D@D}Ra&zubHXa@L$Z@s>0=8tdyy*pzZ+PP! z62E6dq^f?#2Xf&~lj&n@;w&mrII(#x_Bfz6F?XBU0;p%2D*3Zqo-X!gGNgqmb_#Z1 z!B~FgjQq@|w+}fLb787RQWKJ|$szx?8Rx7EnKR7XLnnK@vv~cT`44MfNA4^i>X0A@ zG#0w1J9IhG=8?>awe-YsId*&RX55pH!ti;1V?r3i)F6D&!0AEOy|pMD!=g^E=raiQ zEfp1g{oUCbH+xL(HwTi$rndQqV2@AphFU6H&B`vb`^NoS$3)+>NE~)2j_>&APV{(K zt}nnQE_qGjKO4i?(*T>m1v|z;mk_&!-L)$R&ptne14{OM;Y^Tu2qX9Ce4nG}b^CQH z(#9j7Q;l}u`d(M{e9icQ^XY+f8(nP2yp)R zS9QDCBdmXqSft>#CrMy}aEDW7qpeASYttmf^aHRYxkRMpY~`q_|0~E!Az1j`<>%3~ z%B^?*3bFd20fbt;D%L0tpk%IK|CRR$r_#FD*WTgO%AfL%qSa?1BixLg)~olFU5omv64OuXMM9^4^36N zPGNrwp|5`F?hqyk8^Kl|j6c0j38v3E*RjgR44gqKiN=1p?zA5$1XEZb=iD2QZBalj zth0r--2X=T@6<6W2O5;KQu6xxrgZfr@+0KdAD@)-IS{L`+AR0_JF-EvSqiih`UoS& zefPM^CHz?4_7Iao#rDW;_<^;a2Ld1`H9GSEsE2wo3p(+25#kvo;`8@0+v6W8MdTdi zKoqBIz<8MNw{la_+`f(<`h6mX`SeauSR+mLhi9WK^z#gFGMVW|!?`pMzbHHArfH3? zT8}+)C1^H_v*gbLO0Fr%sc>k_#_s40^xKo%)g9a;j+jhsz#Jj2V(;g?A8;Q6@_|eh z*{HH}z0?HDNp~OJ3pB?YY$UUyX7*!+G1aP=DE-s>_0~!qXn~iWQ<`H5>Zsncy6An! zo&6Cno^2rLd+#5h0}z$N`!7~Nfg@jU@=;hhC497=!q2DD`eTwFp$c6a__dXTXv59{ z{9y-oZV5_<94l^qK7!6vv_3aDJvDjuWbUHhUlE}fe+S|v{m&xQErFwDx6Xd;euIN@ zpN0Tw%WSi1f>$SMgQF$${uNQ4IeTQ3BL2^!(<$tW`i9(qhmN^G>=MMA^JH~ujPJi! zkCI?=NgXxQG{ZKQ3^`l#cDchzy5$XMxs7%k6QY78>bhqNRJ&xQ5WLe3j z>gba+MwMgI;3BS;kiNxb(8N`xCQjhGml(9EV7%J08=hqiFU0{Uyxxb&gQzm^JPI z2-OxG;u8`+6eqXm!w7H3R&%zm?<(2XQiD*9Gie;^wB8XkBe}y8(%XxHHMRUcY*vox zEsgd;G?G5&eKz}tR@T!{jP5UY?oA$!AFN9q^2+Vj#N0+q1@_L%@60dPPP+SW_6hh% zxigaCM`6^NPhS}gf08gg;-b*K|C^qU?;*WLB6@87_Vzca8v9nqvx5d2Za|knp4F>2 zi>km@a5Dx0W}_JCeY>S@&*ujTuFv<-e6{9-rnF%>JJ-};5ZL$C#pvhVvRO5?c{#Kg z_%o-S#RIIr=Cn|T{&s;N%CAD|P-cI3X{MG7u@}42h$81W>d$F2Kl1Jh_GnmmIM!l6?gIHge;h2=4*fnqun##H95YzA_A8CqLkhg6 z+1oEpxv3p%zY?P7?x(qAtQ``DX9lA%K-78V^)|0f>#@{!@L4AMWrX|G0Q(F=24H2- z&)RNSgN#knBWPt~syPpf_e1`EUf?gIg0|-?qtjuPE6O_rXC{C@l)omArTUMW;jmsd zAY%PIZ{_ymxYd!=+g|l)V=+=Jb>=H8Ti2UqYRnVK=Tmb?UPELeij1tZmSCpvi>`F3 za9wW&B=xHla{$@dnFu>dZTh3Gj}PSyBo!?-^&_FqfXzIfew3vIsn_$JWznA)aQ&HD zVmv;&A@}_N`+OX&4l`vuT z8(8zXI6`jjz6cb-5+r$lgxpnVP-{&@^Dax|(Eml6^7QCvXWx4ur(Jter~HuU0@A|p z-g8o=)7V;aS_33g&fj%Aj1Ir)$l@mFs#jPkd4~or(4$~}%q3V3V*WNdi@&qVV*ZE4 z<=uYe#9-|!Wjcynfloc|z9l5zy%(l=Musfh!OGg<=zK?dvazCZwb_(0IH`lfJdm9` z?}DtvQgK(@fvT_bH)qr>VSo&F9oQyt6y{WQdR{cJf=5ucz@J^LgWG>#G@>uQ`ELCb z7JpL4W)vP4pa{JG8}MubAMjuo;1?2C(FMM?giW7E>SrJ^Lat{%_gB7*S&zkyYJu54 zWTgm82#<#U&>y(blSZLj&XNlUW~i}*v-c%1XWQv4HJ_VWUcZB{|I!Si06y;Zd*(H* z+aV8Pd;?w~P2m;}v(R}(r^H*DW6AtN?{;2|l{5pQPdaI!tdgxo(p5&XeV&N54m%Bm zfU}SIthL;Nx}GfO!eqD?275cCpoH7lbsT9H4yH(Mp@B$zEmg zqv3Nod#{3>3(F&u8ansnKIWuoRfmk6oRjnNDcR9)Z-^1jSdY|}AV8;~$g22AR&U=6 z0!#-M+CX}fsTKLUJY7rwU)3tZo#(y79{K_y_!NfSlnlTm7fOl9kLGH>E}e5}29-!V zlzDc(W|xm%hQCetN2Gnwi}iBD4l@nJJ#*bN2Er$I=yn+V9nvbReJwY>p@}N(7lG{y z_NN}Er8UrU`T5%ZqLZx8^ey1PmL2v;5qaG8Un8+V>$cPkUCaA8nN;0Fem!HZG65I) zgYcD*ynXvw*S>Sw0re+aNPxKH^yOdS!iy$eoTs|0uiR1J07@|jePVxFoU>JY`hHS1 zN9czj-6$SxY))rj6~NB?G$JrGo0A~(wbkkKXqk$N?2UU3EmleQ2P(+(L0?QQY+|>h ztx)9)JVEqPK%|aQ;td|Hhi8=tt3{#W=3xr+FWxR#&}D0UD&QG?bEt*h9Bcg&x={9s zf$AxEuLD0(U4k;7*VSAEyQ^UA(ETT`I^2>}E^Hlmo82LN=M*U)-Yb8vrk^uSiN3uejz@SPFo(&foOe`zqOX+W~hm(ard zef;%$7;PdaE7K-bvsNQItkUHHAFNMbHZO7NJHTEM7P2PgFq&Q~Lh7;Pf}!nXEpTZ~`3F-S=u5-?_PR8m+iRMD3AS#*z?7?@ijjjlh=1nJ^!>`4oCKzAnUvJw}{`sLi! zq?f-Qm_NgplT_+Nym$73QraZo*ro{L=UwOGTh^_WCWs=mL!vv($2YzIQZuYU$Udxt z%kgQ*q7yU72l&Dld~_T%swm)GM#@c8dsu2|toVjreXHC7;)R z*d=L^X4)rGDQ^G71Gcr&kC^)wT2*a5v+Wixm+$q^fBXLTQ zIA?9$+eQimy-ysI%2OJpX0ww48Qp1jnGfq@qJ)WGmJb6 zzw|6uvOZQ&ctqUY<)ifnyb<}Bf_DcksR3lZ@yCyE&v;v`OuMchJ__976TQjx6Y6D= z&nCZMJ>xo-X&4$LAXAUZ!8|9$jPFzV;;Ax~~7JK6#S_Bo;GYzVGNesqkn(Y>i- z*;#0(k;%K1iSf{f{cQ+iXlQu>bv|Z#WNg>D4x~wV6LLl)rScophhmkY(U~*Tz>Rvc z?!+D?H2(M}T2nuV>>KktaP#@HOR(pen`zaR<`Ovd%Ug|W@TT`R7<2wDVrzFqstrDc&9}j&&l?PKXjDEQMc3tu_O=4FxD|jvA9-Ix?Cpa^7{@s0JX>)sMXd)!2 zy4mV%#hoY_mIYl6wplx`p0IVQrz*?jVI;KzHzG7#olw(vDy$1 zC|1;Kcq@`(X2PedoXn=|ZqLNoT;gttLa&pvOzgvC~*43>1ercxkkZQfH$DAv)sZx;rBa5(2Hsl zsw4GYG8N4y8=QHnEj=nt2ulQcljK-o3|Er2$JHQlA7j$Oe$PG%x-o90+TLQjYe<_ z6m;N@ROsiCx*7s}6UZ|(|IMMG90Ico&JQ$4tKIq46eC zPgffCf?iZ!+04m9+>{v>`0iWAbhs*YuV*Hq^Tj~>J?wk@`xe3c`08g|czSb1g&78p zN`hr0wU<-nZdQg0@76mrXEr>s=leGX_;}9k5w}`HKI!&8Yi%va?Nf(S#pM+1&F^JI zBCa1Wb*nJ9-;~ZN@Mc`aIh1`zTSPdIrp(t|ZVV>uf%YY|r*=8@^Y)q)ELroW&rq@e z9I0PNF9G&hesF^ME2Egff)Zrtnt&O4^MjL5mcdXI+@junmz8zBtW7p{oac`}uD@i& zOt;#$Nda^UW9`6_<9J23=rCY0aVo!G?J$8*JHP0dPA|(@sIn+}3Om{ho>SpAzao1U z7#&1*t-gYGDkmRyuN{_;>Y&8O0Kb%!%;mh&&wtbngg+)fRQ+u(dkmbxr&4&T3BFd{RU|OuH};i%E7DQrR}{gs zo;?ciepyYi3mu0y6#=X}ARg&x@AG?~xE7fqHPt2CVee)~mh$89%ati9MejJIm6Dv% z;iIdQ)G2S%B~7axl@#tgTohqI%WrkvQ*bnw_q>OFHC;ebg3sOucTDTS2b2?cyj0&r zb^5CVY8k{v&LO#hfp00di=(!Bl#G263k~u2M|LDIzdOA)U?1jK=Y%$I96bR^B!^+z z6(ubnLEhj@zgs`Jlsx#-$3~5vCZww2jA28NaFV=8nWo%yLM>rbf&Ah7Gl?(6_XP}& zVIlzn+2<~udh&`o7%NySJL$R(ays7J=~Vk8DUvY?>@o~~(|=47$I{TA%+=sm0OK(j z4W)VZ(N{CfdGQqk7QFlVJ9hJjgnLt%JD}+~*<{DkYouVkO-&D00dibQqRQXavW!19 zHyeO}fPT#D9m=Y8c>4i~6OCmTv)cE!xxUA#?>N7IyLyK+Z*o3=>WRXg7MApD|FqZu zJrHL+<;Xo|eY4tucyVH1KkxfZQqza!0pZx_ah}?<#Q?hkLl&_9kTU8Yi)Tn{+s*qNXAgS!PHZ()`oAx`G~!!zgQ z{2-M56o0D-1B>xmHp7-BKdC2@eYfzvTmC(_$Wnos^iCnSV|#ZMsMKb)+3<|gMIVaZ znKoP*V?JWo-W_VYzP|Rr)==;1m7) z(XwOaDcz^=Zc)zJH5yn%vtJr_IJpInc)##wN1TKeNZ&Vh%TBN5Yuo%Lws(8E!E`L4 zI!z_bz`P+w)Z)aT@IX;{{OLP^)qTok7AGB2g=f{1xYiat(iz}i=qntF=G2>o6Yr6^ zV8B~Fm_`xU|BL?aO{Z~!<;5cfTX8n7qN&;dF*V1=;CG9$s!V1V+bCDP1{T(cc54hVf==hpaJt zJiI%^bgPSJ?{~`C+xS{>gj&|RZL&H$obN2#bTBTliJ58w;NMWlAF6MO>+x~=Gr;GV^`iN zX?1{T_>Dlf$@a{*E~guEFQBsohn<7%i`%|*8~N{<20dCa7{@*_Kh()icAR`e&QAo- zg*eM2JXsMRjI!6rnq*9rrr9)VH&ONL;?Gzr&|)49{$Ns^xhq+x)+5tpi@Mp?7D~v8 z+cLaSap2iW#((iChx=_?ZaJ!}wGN@QUjoS;M$rL*fpf8DKFw#qe5g0bQ$HpqMOtjR zW%p0%&zo^rDm3itjs6m4s^1Zg-@s%(gtJecg0fDEKNAecz3yG9VV)EZr=1jEv0mlD z%KIpBrya!I(V*6+2GKvEoh)8vq}-|@ZVF)M2+8<4!>>5-`w-QGVyl&zV@LIcWBS(Z zJHxD#kTDZP1)OE^jjG!R;>BN^VhC1l4j0aBLV~7>Fb9i{S9d?ynuOzyc_4xJAG4tT zhe{mtQTvDTkNJ25zi$32&qGf0d*9VezP(K5{MFBGdlOlXjMAn^+MQ@Q9g`21y|9z}=1%hkT1T~FejK#e z2;9$SZLv9UT42I9ZJ0G}$IkiXYg^>+W0ugF>c?$)1YtVK!uatm?VGZc7Qhj6oZ4 znol9CpTvdv&le`ek8$#CN8|Jp-xTW+RAqBY9?R(5ziuw!j_keA31jG(r%oKYRZz0^4E>mq;>*O4K=Tc>5bn`eBLrvbv#LDGsKccvE&$xA^e+j8 z^#FUkVFq@DW-ML3(lkkPv1EL6K1yY}-rnk#c_i>zZO^!l*h{cQUdS`B&anH)s*;>2 zN<3g}-`MBz$KW;=vAS@~NBD}hZmO5e+&*J(Aa5{Xrg@~~*+Y-~cXC~b__d79>u@_C z-oioC2lUA&b;|>4Z1dvP(8I3dMCF-m=-o;KmwD>7s+xYUse;bSo1XOrorIE#8``~! z250%r>ic#sl{6w!LRUFG&sBaIy(+sQUVRU9^I@QZj%K64@CEVi&5bwD2i2{SF?%!V zynV`P)@kF**^VcH=_c`C)2+F;oAu;S^kY#Yc&v2?>xVk_T4XQ7hsyyV_!a9*Nv{wb z_o<;vAtw*7Pd`n1<)PJ9sXAR>nN>8VMbK%gkkQHQ-hB*7-gJKID4v;X@Lk`kjvVBX zBs1cxJthOj$(i0V^*8P?W%xc70 zdnfid33rz3w;%zPNx~!^ocQSM*O(o?TSP)yT7gBb^#DnsTib~yX^)0}5VA#w(+NS% zn=iU}(}5PN|Guryr-tcMr;oeO3mMR&xWKogrdFd}q zFP1DbCkXg57U%3`NBxfL>w06yb8hOa_ccCTuf$qZOg}zVi($=$5w;a8^^};^S3uO=m^SMGrr3zZk*4r^B$US9cKj*M~4E(^qvy@U$w% zb!~ahJ-hmC`)$vOS;g5XRcnESm`yQ)b2>h7rTqKE0n#Jvwiw46{Bu!%OxuAs*>dD< z^Qy z5#$=_`>|KjL`=31CX4eFfkkD~Ufz+b;=oQ*{jS()>&w_?8(gR>{*(y4zvqpUAo|yh z&G4yWC0NB43uA>k$41_+rT2yauMR}6Q zeEd?ql3(7O(mj+=+rVCluOYa4N}4&ed`t_QEyG6*&x;u$QunYY_F`8DT=ngWwtcPT zZkXnEz`SAs32Le^jZ26jf>H1ASz@eP@9|kh2F_Q3+d9p(RY`Em7MD?B@v&XAh^`gK z$MUOOy1bsXO5~KiPCrEwy`FpcO?jr?9+`bcU7>{0uKNd%BHp6Uc;=M<2)2`;RfD%% ztaX{Qm`TaJA7@%M?Sjd3kMgTQ?o`G27j(YgR)1&yxoE~k=gWv zZ0K7O3wbKaC$#=8ZRR{dk7@c;xg1Eu!OMKTYxBpAgJPSM$n|AXD&2pZaL?`>kO@-| zeIRRYCAYS&;PfS4?OpA*+if8&^`5-pD|CeVw$>9ghc|)g25=z7!Qv70e09Ktn|n;d zvXrLHHuF}G~qLFQdT}yT`Rh)GOjWKjCJ#E#CLXbmov)@#VffnyMuW7L!thce-ggW8YpkO`l!?76pi9edyM zPy~1T{lIr%w#n?Z^S!s#wnvu(H85O*`| z_w-U|_uxY+N&Gww5PWILVkH+}Q%TCew^4~Ox?CEbw9mkXi3fBYU;~4-+J>Nu!EY7>9mO_T5S<+Eb)btwCR^~ z6O!USHQSaR@O^H!t=SY4NK?gUM@Hc$?XAQ?>fcS1(o)tWj(*Kbp*%2e15J2UCG+Td z+J@EEx0^Vs7~G+X-&Aqtdz4z&WHDbRqxvQ2we*L>=dJRQeWLsBx@}q+V`tNnL}f;cnoRxUGQ8mPsja+v_S<$Mj4w zDRQc~&DjE!Q)q2a0T%A{lX4zZQdzbJEgpxmdYDiKMD1B)U~B-*%7F1|>+(!rQjWy= z(beW7W8&#`R_SEvU!dAdMd=8RmQXhh~lQ|c+>iaygOW_eBS>hy!8uSj0&ecw4KFcW~5gfCu> z!F@H!1Ktotj1PI01566we^B{asL^~C>OF57ZQk3*6nTPOfZvxGDfM&e1nR*`V^x9& zy2uajqF|z#v2L(!GLwUh@$_+r4%B^oE~~DgJ9q$iYCwXFfU=WCQN2z;6C{TG0!zSJ zm{uu)G}`z8pmtZVv-#=K2^V!#TWq0Qz(^3hzHC(kRL6@_GhjCSYOSu$Yqq`H_kvOl z{UirG@}Z8#Ba9Uw_FO$mQ$3HG8cS)ak?T?4a{zx0ZoW1p4qB&-0GJuyYSFyzl|R2U z%1)mX!-N8Di$_r;x-TP;_RZ=*4fw0a(^<+e3A+va3mEdpTWxEkXY-M~At%Ld$L&AF zwyry`O87YTEI%8;`fV2>Q=R$Jaza_r6$AZ!XxwCRSi@H1X_iUx3upYx1p!4+PJ=tG z`Hn`UFOpaD#g()vK;b`Tc8anlR^|h{$phe4(M4*IAV-s}GIyGyMk~#KX@^$y6s=YD zj)vHA5IuANKK2F~aQCxT>Cr9(0ENp$ zMh$8|Jt^JAD^6-D6(9yuzpLKuJoS~o7t18OnViZoDBYLNOVXoB2( ze`E|B&;;+x|I(MZsX*!-`gD`UK#1UFphrQ1Tz~Idiq6YR^=FCO{(v@o#{cf{@2MV+ zv-m9uzJhQZ6u>mEg#q00r*eD^xJCHji;Q8I6z8EddW!nbZ6o@QM$xGz+i)E4$We)l z)vpva0I-!l_@{QC0PHY;B-XAmLycZinqc|2c{M2x>=)s!({T+=*G|+@|lt!-sJ@pqjF$6@?WD7iURH7SjHs@$1g#iEi<#hb0%K%Bh z&YuMdN&!vS5vBCZm_ING?;M9FAagD?+0Okrl#CQ8BKnOh#q9x+fYgCUj!IlQ`@e%1 z5cB`tgLlYSBddhzqIl;sqI7+xlx?QdMCgQpeUt|#Dtefa&*hL`dz`T(Pe4uYn$4GB zo77to(Y+sG1G&ggVZ9Tn?3jTsM5+0qkruk>^Xfg|H z9{g69#SxKIir<(6brG`snw(;)`vufGZ3FJ*P-(`}>x#XYNqa~2u0^h0NK96$P1Q@C$=UYvPcL?K zwbStM)$@xV`r2p=W6syAH;U_2MQ@d>S{6A(3YiKA<#9~^>b>~%(8>ogG4B(f8?_v3 z=(;~V+TLhUuoupo)z&y!{Bu36$##avycnmix=6jgk&Q|(@o%!^fx7didb^+S~o1K}y}5|WVOj3#Em z)0!x0TjkI)>1kbPy8Jk#Wi5JbQpK zT-A-YH+c;5)^FQxTNTdaLP%}#6xHok|BxHS#e7V#qyn81$t+o4ChA zpC)c%nS9{>0X@4GvRi(*>L}#`fj;H8B+LZZ`_n??GT2m(q48Yk_o2_|I zAAv!=TTJd)t^14_Lnsrh^{uP4XZooxybrmICVfZW8L?p!Wc2>}?U(iQd6C+`?;X3NLC7&zYrN6Qkycmgq=^k7Y%d0RMZ)Ebh*#ZCtMS{(7h>E;ocZS zxG`;>MVZV5{m5yyUZErw=UWT9fqX-RT(8KmoRE~tWkCt;Hn(AGI~~*zIi%^mOrX;I z_e_-IAw1{6dw(q;w+HPGx$MAleej!trR13&`KFnM#y@`fROsWm7ZX8alCBD0eL5x= z!V;0@Zr_F-8x(-P&mgvmTFDCct{0Qk14!yJLe5_sl!#KBa_`k$h0QX|A5DDj`r}Yn z;pv#2mGAlcAwB0noeH#HN|85o9$paS+4Q=u*gGO$zSF4&z}EmsVxP>-a`^{R^R5Ud zI|p*iUmM|{8}M7pTjn;GuQoR$q>a-Zu35zAY)?D;+Lt^EoD=nDr8=(VpE$4p0W{M?TG-L!7A38Mgb=6Rp=~5=0a4H?Kw)%GVA)Bw0vP zuRDEnz<%wxxC@iYWZXdHjdB{yt4y0DB@*lv1F`ah`#V>scH(XDNmGZ9I_P&yg3X)U{jx0N+ldUEvIZU&9w**o6P;>xfD3&2C+NG*3G{N3t z&^@>0z(QHlO3lh(m?kz8tebZ|x_!HOU$8`Ez*}e0Y#v*&32}v-AMnyN+)DCBHO1}5 zXPIr1lmeC&70^8>y3x0UzD-zgd#*i6691^taz~-N;ccGa1gAyEh3+RdQQ3B|gS7>I zz>2$}Hi!*(OwL^R3Pp&ni*0tDQ11z}6_GRVf4TT9dPG>i%YWvh5Yiyv-&OY|!8E;M zd#+lxStBX${gvKP99gj7^A?|ca`Tq)?y8zc;Z^zM7Cs5zC{BFYA$Zp*;H1c$Qxd&| zQ#dD{pdI-V_+o$P$vWm_rKz6!wb515d1I>M(XJDRPs%+{BnZI^&MngWc+g+guxv7{ z**zg+ZP>9m2@5r!nPu^D*6}pqGr%OR*FE(>g2?JVCVbYIs#q>O0d^gnn^I-VC)eTa znRL)^3|g8}(w)Rc;Fx>-o*Xa~sDu@V9c*RMI!xaC_{{oEiySHHR~Rb`8ZwZtYY~eH zjP3F3?>`izKr)Ih{M$f%?ZrHKDkI*Pr9qdRbSB8Od!kJ}Cb&MEW#nC$SmiTe`P`!T z>jr#GO%pnDOt1qC~>-+5~{0RKtiXY&_i)YPkaKh+kLbJ649Yffg9+ zTXsDJu7f6G_{tkZG-%p{F#Vxy`i%joA+FhViLjKHH@B=*+mGT&Q|H6<6-l_=RbNbV zFH3$7g5-P&)R^w08ze{$U+51xpW(H-#$81pM^Rn>0y95LT_R&to{XrrU&vUPY|E1vFTVZ>jqM&;NQ>uPCpMuWx#nRdnZy>D!#XiJR~K$AW=m{yb>LK4-Mv1CO+HUSq@0r2R5oZ*B8*hr z=eSm;PPhu{5F{O3aKY*fZEs$FX8t_psv=AKfJ%C|`<;oYN5%S8%ErA};@#Ei(PO%W zvifxpd+X`u>SDF`-IvSvH=>#5<^F9yTu4@A)w=uuOV`ZKHxtptjOfdbl&{^4xBi}M zGI+`ceaIi0IE5^lZgId5M$0GK#`jJltuGMJy@ehz&K;8AmS|eop@h9CnbltJ0BJes zVCb6dA7<~i`X+%cf6>*%`xgxG8pdmeWG zhO7qC9B;&Zypl@P+j`J^m=RnWP-GPuF>RV4X}FBSMua#|B|F%s;yAVmrLgG)8@le` zm?UtX=4eSqW07f5%aj!!`5~&Haj-CtCv55oU-GzwLu5hAX7#q89 zMGLZn-cTL+WYBCH9LvEh~3%o#nXv$|2mqhj(Bx4L=pqkO9~a*QQ)hJ#iz9R7>3Amlxa zwHL@|8dw!=n8#y`qTlGXmUkP%N0rD=W;J;_xX+f%6?pN~gF_rzq)0oP4Ta02 z93JMMC$U*O}FL?{43E7Puu$PKUQe}eu zX3h}^Ki`ndRLHsl1z z7xfP6PQqd}-nYoi=^TuFOkU6V%e48YI+yuH$WFZ((#&q%c3!P6Yb{5Wfyn8ZBoUWti$z%%Jjgx{MGeb^W=8b<9I%`5sZ9`}caqT=se6iSgsJ^zvtiJ9hxzQ+`aw zSVwo!E+^`96n(@V&;wepj=Sn0EXW}K|i8;VHrj@ z`6NZ7y(xZjKJs0ukUhdWQ8S>1fG>5nB=r=u)yePh601|-ghn1b>CQ;?wwW9aZkeZI zKDG@+8@cFe%eC4?x=m(?Hk;U<8XrfRwz)<)9x}O>I(Dl*Tyk6lf6HibGFoFJ|MGSs z2b-!0&!svBzpT7Rff{%g1T>-ab+V*J6Xf!2<6DZd<1>4-BTZIAS-~;s?plG4$wmR> z6y|4Nld9Neg4JTG-Px7=3P*UUCjql#- z8=oVDG_R!jHXg-e{C0&z)tlJHUoj31+UR>9CdR=74hkbGc~1|zF(l52@y1*ymoQqR zYL&c*^^LAZG~}BK9ZE$ed63E0Fe0b7phB)#r!ha!Kn~9<3LTRzexZk>^#Bl9k907U zn8x}5G-2TJ3AH6iJ4ptQrXY~qQXVUx4w1DA4xBw36&xPM2xaoF3j8#F=E#5f-Pk>hd2puwfi¢n5eWgvPc(?t zEzN2frk{|O-z9|g-nl{YzVgvLL60^rHk{f9uuLM^@AoA%*fa@n0* zY@|sZlb&2ny<`(_oKNswyypnlfd|EB*2DOe;w%E9^dbj;JGX$#KR~#* z$kg91zHr>3!{1l5e|DG^{e{*_0Y4G|+JMVHc%T5G(Hwc8fQUT8->ot~7GK(K`Y&5*^`V2r_)h_8N6PT#yEFgMZXVXuFh93Qd(lJptC%B| zyvS*L(Y;lQ$1R|&_3I?^SoYbU*50|^RnQctKqmgbGgjILTJ9s_-ZC|((+w+pEPeFZgoiD@IY<0KP zxErlEC~f$l0xqYn@d%9=(OkpzZyb0ZK9+nAE;W;|^Ag*ehtKZ>A1Hb;Zf?q| z*#_-3GrtNK?7OQj)_%3uoRifhx?5{m_5&S8Mt8@ujC&l?QFgg_I@)U$U-(==y2A#YeR7=_ZS^&PmlY zzNFvKB7RRYN@ch034=8gms3Lz&)A^^MYUu{6pPxE#778scs{dIhrWv(&rx87N*AxT z)^V=u8hnppXKHYAND@jh#RS%v!q{a46ADAk2JgI<34lMA>AxqT>*mxpK3p%}zrFPm zxkqEBeS7OJ>HlHwy`!4S`Zmy+Vbl>tMiG%d$N+*M(mN;!D7_;!BS=XgASEE(21swx zLhl`~t$Wuu?^^diAZwAc_t|Hk{d;~-nYawo z*UpTVwXP`gS=D5U>``1je+K-?L{X{oC#q?CIj_lktJ_4KuB7(68#>QfUU)#F&^l}8 zfiFzOuF<89icaWja9-w&tWQx%@v;91H6}$f;jdajn!S%Wst$Y#{mffzg$r5t^y>>E zk6!$=n)b{l6ILHMw|9x!i@Af`ad{=%qb*9BbA=SDWheXKML8Swne`x{t=$s03;RV} z=*QP-p&Ff22C`M^gf6X%ZJgU&{_g++)6Ov?HI^G(bVJRUD5NGzGi-~wDfKdJ_cnK% zu8~YaCNh~>&HWJIEUTXTAKu7Y+RpC9^Fw#vKTI38B%W-$x?Z}*B>ZIl3FpC48F~uD zXWJb)tL3`8Tf~nSdqUs!Jkeaf0&E1nZa5*SqxIt1u!TQ*>yn{f8>a?GlWO07vZMib z#r5@E?20%C!86u^wc6{QB5=mD7H4wU9(rwI(-vMzFmB+VbJy$Uw5uzxSL!=VQ|c@Q zNi1=ijo%Y%hgpp|GkfTFk7`xQ*Al{02^TzzS=cz8Ju{MaWZHjpJ1+mzEA1>jiz9g(Idr1 zRTf!L zpjbGMeLX-vrk4Y{EWRJSifJ6fTuocN{kL>pBC!;J$nnEpi5wS}k?ENUP?>W_=wD03 zOil!IH=kC3BWq|!W4ai*f{l^4H~5+iE;fFuo$Flj*xn`Ddnt&h3R`R$h1Lb$TzI?V z?-_8nk4!{C0vNs8{EO34?DlnqH47-?Ac>6fG0@h-Z%!Hu+6Is_MHj3GIAI~YlBw@OwNO;-=Hbqdevemw~;a25>kvyAi&aY-=?f;Zb4 z1sK@%?(IfThu1H^f=k9D+Zc*cXmsK{V>_qXMxFr#`NrVj9!#^!aFzDeG8xf4DR0I2 z54wlejxYUFSQ&WPX9~b(^kw4#1vMNth6ltB+u8s#WND}$ZMo=q~JqQ`9UR$>IK$xVZ; zoCzeYbaZpbv1=AQJ4)gs=NCO~j-_Vky@=Kr95T0gjhyycsmblStNJAIntUm0m8=9k z66TF!D?0DU8$Woxs}j|HlYZGRD))J% z<`Y@;ySsg+ktQ1Q>Yle0IENt&dX}%V4qR6ph%C9aTr>Q@&@n^He@2%S$28Bd;>$0U zedXkK3yB>iL-g*+g)F>ooVrc@b3k6}e!#`?E-SPHb5?t(_^dzJz`9X+v=!z}FEK5M zmCNaLFLX?_66+K?k3%<{L9Eop@U}m@lP6-f>$9GeuEorgbRg_fQ``e?S!IhvTtpSj zX*HUUXO~1!Nhh0RM(uIKaV=Ns79}cuFG#r8HYg@2#c$<_0wG6_8*?)r&kFo^jMEiB9*Atq5UOR zj_RJYYTGm(B(3j(rTG)9&8*isc)i$gCs>f#GiccrB@o%2gNNbLSPOWzi^ zrMO@&*5V?;<)01u4=iT&As567%^g^BdJm-eE|sIAjoEVcW|dbV@_9M~MUTT3SyOp1 znq6;1{NEJ>+BRh{ciM}N-&Lld(9-+;Nd$UWPKoH)@V%xD_KOGpJq|g1gWT~+3*^;V zIx)~i6k#VV$PpyUo^3>8VKu^pPPB3&9*PvRN2RCPdmZmjv@+qSHt-Rl+PmDl4+p}u z7ac1iact#W&bviP8;mngxnT86E2Z@&&8pJbd>LI@E1qK7-OZ}h-1531gf}ngJ1hS% zMVlXBX6X;TAf0!v5e$}&m1<994~P@;u6MM>m^mQH%~>&*XjxS`flb>Zt*k~|PMgfr z={Pv(iJTfb=u~Z#RgkEm0?_mCavfWm%>!m?)LBYLst**8&*^WXWyLZR@ek1-vxeD_)Z{)s_^ln@FQZA`@T7nOr%oX-T8Ef zB8_qGCKK>Hh3^6)A*zebGk1iPUgZ_I?~@tRkw?n2O42u-8LB6B2$FKl zl34|>3WE5%nv?Y!yr#<+MVyxlF|LXgYL&V>6sSvZF83mFoN$Tr#Sz(>73u+Tc<@Z+>pN{O|P z`)=2@w3C>Aa?yH#WH#-Ouyy{m!&(k@ue1JiZUKGEvNRHbEAK`mC1lGTvLsgJy_pgT zaaAF7kb1w3`w&|DOj`x0EZl0TA;YU|Y564GJV!e~ap@S^qeUq(-j#o*Ts~XfG}(s1 zXT|9}m)~GvMwm=C>G{3QEQ#`QuWfO#4+wucMRJhiVam{ZAz>liBzU;hs-=bcx!O!9 z+(wuZH?(5BZa?&5f|%f8iLjCoPYVp-ocu(2r2H!6TW*2ei(cOKVqX& zI|DY}v!7WnR+mS9QQc`}sV;elR@aB%KN_ri9T)qT z=^BG6dP4s9P#cCdSfX~!$UYbANx2?UL7u7pS!00(zOCSJrN{i!-Pt2P!~KFH$Q(~h zXAeEotqBhAY=19mwDJ{qbF|iOtL5t`im3O!lk0i~aUaNAKhy7(4YThk->4N)7d0$O zPukwz>d4a zSgmfVL#G37PWm`5CUwsLvO<2N-!46ctvM^Kk>Q5h@elD$w9E0UXG0ESel|SIiB&aF zHwaG~daO*R`!M7|jC9>vh5_|4NUW1FwOOa@3+VSQ!%Q{Srjjo07K-+L^ZE9L)g+Ng z9%*Q$HQ(CQWdg%e1QTjDo6be!aE)%4?I&4W-b~yFL+|{N=J5xXYyndc<+c{J!ke9J_L|(w8H*)@kQSnBz!He!BQu5(4 z4P|B_H;FVeD2VFoE=?jPZ^!6MQ@phM55sEDLtIK3g;TU(M=z?EkNO09sW5!kQ3nhnRX)V z>pA$Qc_i53E0}k_QFplL&KEDocDN>X`0;;=!BZHq^DAu}ptw7K{ot?YB_KWh#(w|YNZ=`8JO#~WfJjn*D1NRi+=HCS zr)d<{Rd_$Hvp#(8k&~1FZ{8O0yu=UemPm0)3DoMfsa*x|+SlLlT9-L$046o%{;E!Z z{_y9M`JHJFT-~30h~F zyu=eL&`_f-7iS|{ZlKHGl$x&~0SJqd2L})t{n`%dhys6aW&qi({EqB)hTJ;k#wUM; zQ)_?xBKJQuTtMXzjrx=N4gkmk-+N@=!RgaMd}Htf`dib%e(i_VpW{&OKdx`a4eXA{ z&+-Gunfe;-d-M1LXq>v1{@@>#0!l_r82_4`==3n)C878YAs>)*2?StL0RU)@M&*kC z(0c9^9QrEi22>L?u1wz%;MOz$iW&j;^c&s(#hSk>Q<%MW(PLRd>FWEZI)|HU-%+u@ zX_OSxK|tsDPd!s$4(NY3SZJpLqd)s4J*rw}=Hmt1pE;yVqj-&6lrQ9+FTTL0tNZ)iz&KU|p(*~6 z%j~}y5B?m1yI@~afqr}UX}Dt9j_9LGt(e2tydEc)YrW1E{VuH~$c<6Pe)JI?9n;uJ z;}V_Gf1FwCoPn6u9y)Hak@ogX>U5fI^{_C^`mF_S(3O;|mh<@O3zqc9u4mN>y_$QU z`~8vLSz-$&bV#$ds9j&)WgSj{#}kZU!@6adAK06ab!vojf-3`0Z$vbHxm@d0P*~aa zBGj(&;`ohfQA38TmK*G>@idH2iJK#iJuCx>>-O`B5A|m*!_KT0I@KWWMAC5Ht|FF? zyzY_QJ7;wjk}Sg7hV4E>uPR>@vdl;Si6Kx3SCBj=4X-N*Pi{)JZB(z#Ncrj90x2O{ z#7M(}(*`Vgmbp;3mVh=MPKRgN39nJ>Cpzf*mfE`IZB8hQ3&O^}Cu zOa7}ph}m-Cuv4Z9s}G5IWur}(G~^(L26Ha zoXsIVqG<6}k#MAL07|O1xFKxf`c|{!``O%ya<%sBn!E0LX#plSThvOec8(u3;f`)7 zt!kl}it#BvstTPj3nXb{b$b8TF#WBk6&yIs#jc*=7B!jCPV+%`avf0K-;W@KB;ZLP za}-BZ#i3WuLU{2JkHNa!%%#NkK)A8kW#B~hmRnbU@+hCKKB5{P^G^8w6s2~Qj=bB#(`>|~8I zzV12S`d9Ut1)CY_RDpzTltF`}6?}DNwwd!RXf3+r}zqy~_J-Xl&IS2*LTB>zOg*Uh*%1<08Z_j#%coI6c$B z=fFSnco(>~xo3#dvP`vjoLxmXm+VRV$sPq8Fr^#rOfb60LPi_ngH=`*77TnN@a=tp zKSbPS(}h0ENUi3DPTy_}|9K?6q`JZ50dbWj7tuxiPwzLND&7M>er3x{F<74}SVAyY z)m?$2iF&#O>3AP`y$DbBC+|g6)nQX;uvz{kJe*f#icWMole<(v6+Nbv&+i!#ixJP| z{{7r)J^sdpJ>5{m&qXEH@#~Q=Du*&E^voUQ@_g+Dr$)7QjH^mYtvsZvLO&2s7w;2= z`o-JGzWjrxfW<8X!o;0B^GrD*jdvQD(5=ip@6~)#4^x7=M-j@Kdva91>r2QegO%3& zP&T!8{PBYeL??&=-e_@3=+08Qp5OJ~jCaHv%%U!qdR)@2*VX*lObh5hJ;~v{pSJg~ z*wguX=kTUQUTq^A%7q0J36=M!pZ7tFwyLqRG8=423x!W)TfhBSMnR7rc=b1;VICe_ zCoR(~H&pSzi+NV)Sr2+Mr9m{r9efuYEpj%I0#rFjFF0g9j~f>wEzX$qbU?qfHi~K8 z%wQHL@viVtV1z~A8gb7Fr%JUZ@OahXks%4y&=`!CEcn$?E@#w~R?LG%*HD7-O#{Y0 zyD}InlTeiBGw}YkPpry5)@(G1n$#C6El~k|xyr&5L=H4@ppGrEd481TR^ZE#)q+bAD%DRywQ+4SUR- zrb+sBgLWY7-!@yToXV*62)ifF4O#T@11+q&5ce`>;U{hI3mRjpg)MFt{n57&H@%qT zW6Ovm(T3R;GNPlO#RmBL5RwOG1wGRZE{W{08%Qa5Sx8*YtOn44D#f1d8Ea4(ol*^I zCXh-;9$CL$W$i2-EFA}4a!H)V^F9o0vcg!{b{G_X*E2cQ0nf|^+5A5(VQhs8a|*u;$z$35dBdT zkmyP*k>6N{Tw86rYz{GO39C`9(*d&wEs-Sr_4m_!+7yaSNmUxXGlbS?Y{)zBEu zYKgo}>p{ReG2vDqM#xQEt1dRI{|)p#%U@wkpGuR}5?t2pc!5M26$2G`wzsi&5c&%g zPz3gJbDIR@`oL8jcnZrpk~ov&BRNpDb1H+E1xELDg9YT2RyAPS!_NkOml{pI-%l>n zg;Y7~#^dT+33_;WGj`zta`lw8=%eaV*Ua8meCIqQ3i*>_4n-8=$E{P| zcBxe&rAD5V1~u8)*WB)V1dG%u&>YAxFQygg44ox~)Td!~M7khoG4-4XNNb^gg6l~( z-1$Bn{fN3DW0u5kDlNqhnp*TryGy#HDe(-QYJ~#eOB0MFjkrd1i6-*cL4b1A{?Tv7 zbTg_DU;Qt2=0o0TgYFgqLoJF*5iSs+p5qJ1%b}y%aV4XY66@fZyv4>J46%#k+%~&c zDy!ViU7YvIpdmF|k2Vc<(#ne#e2ti*OxeqfVIv(|A^S{&!T49?nuhYFhX`9!969x2 z#Z-$sYe<2dG5*O$j(6H_C^U-xlta$mW4!-5UG_0`cjETG-#^o6PCNgj+tRG@h@r60 z@`e)zKN#yg=A?gSDA`7-`}EEsZBHHd31MKArZ1|G<3!}SPO|CIz0$4J#23nGWG&sR zjA$r!i*14$IXbL1kI0)gAU*gHw2k8SP1#D0)`+Gcu;Ll#q6jI6ZN0jlN=A8)mW}(4 zYsPV-j(!busky^J@|6)%;0w`pFg-_WCq75Ct$y6&tbrBD{OWA(%7|o_A{~ph%ebV_ zWGE!fBsaZNppp_NP>R&loD39SGS$pCh5A^THBnw(AuxXuw=MLE8!KM7Vy<3F%B(!w zHr~ln=h~(j0_}&v7;13~YpYcmb{lmMF;@mg>pR44KS-reCb2IU`o^3WH_%oi&U@u_ z(gKicxTJ>|*X^86_5O}Za#g*jGQqlMhKoVddBsZD1LIL!nd20_*A!qEyS$BpnIG>4 zI)CSQeJjyF9Avg`==EJJ5NKHd$5me$JTunYzq8_%6WqvL*MWHBmNIs0KxEP@F4$a? zy#w*wEfJyCU%g^dJ-A!1i%F2S+c|=jZ#U=o={kl`s#Q^6*p_F>Q*W3{u&u-rXAuz zc$*jA2i`46C9J?EFh)9L1C4`K3dZZMkIjY^bNf#EbdX`?0AajfgP(L3@1WftG`>{D zK720*xRv|f1Cb>tYlVL4g88*QQKwMoRHU|hv<*&sb>EM8%PPg#qAp=WK{T{t#W!gZ zct^(kY|(!a705}Ui>9l!aIgI>0Jf~@`Nru`A)uG3p8c<=MLA~<+ARgBI2^4K1dC*$>Hhy(`s%Sm4ir!dqhgN08 zL?x|$jfaY(bzyxV1nc|>0mfVrFh~NqeMmjobP#`~xk52T#blQJv||n=^W^sw12Z!8hg>&Sh>{r6*Hlx# zJm#ADudK&T@xKAs**xh1A7Z@opKG5&Qc*?U7;v_ZfXChc8S-k)NZk|r?@W+jB8t0k z_8Z?c`#XO5_so4SxK(l1K@R5X+fv{!d+Hss8Vuk{Unyja01x#4j@6#}9=_yB&WH{A zbJ|P))@lC>()g8n&Svr*Q5rw{cfh-Uno<6X{>d8P;Qrr%+CZ;qvgzQjf*0g5T^+GPrTN@XH;ML%el{ekE(AMfHQJE$d4}l zcgDrg2cD-{^&*iW+W*R97D3ztdI_g6eK%N-`G*;*(^KPj&zrMS5#OngGe7sTUkI$n zl(>?w3mX9NnjTr^ScqMI#yYM~m&aVJ|0A4-RXP1tQ-FH7q>+6^_N!H#V24MV82RS` zv{&J@2Nz=POUjv}PBn`JJZo)WVVh02JBYu4Sc3%Eddyh3OvUn&lE|oMYT?6|Ta7_1 zB{ha%iI(&YBnRC7ONn*ghFt`kYYI!sthIzYOITN=bY0cnVzKp073QDvH+ahY;N1o@ zk6qVYnJ2JywLQAFDLDW81zga4aykq-?4E|H8Fb*5D1t&_WHrs^YW*OzV0?m9c`ahW z%|y$=d4_(lENADn6LpHlAauMzrDXIrYL6a9h!77#`|*?dTrQAsx!|_S)Un}KUdzOs zog}TQUEEtGO)G~10@a_#@F~=JG++?$%4{91M04o+^I1#i5V40H&x#74?;d^5^aQ>^n5%aj!P*yfkzV9Pk6j zVhjiJF+B&0-wnIW<%x<+%9)E4FfDO~FRkhy=R_?<%^~;;pa~?nU17c@YF_2hosv6) zAcob{CfqgjVCP<;^(!O3_Bnq>O~>l8HygUJN#eHr&b)YiQMDHc>fp%iQ9feT@qERF zR{F+Od$!AXolEiO72**owjce>UcV&v&9yc)MJ_*B)U1ZmVh?$Tegg^if!WpN@-aT> z;-zymYjeayRv(BH7VPcJQhT;%rk(H3QmaXms zGd0ta5)jo$#N(}GqW_{@xOSOQ-|IEzY1~9NKj@CJGBkg}9^Ih0L$8aPBo<9LHyTY& zyQpQTt>@#ATc+-#(O|MyONzAVr4Ee_SFB|+&K*}){x}pp0Ks?Q?9yj6A2N^D_Bk}@ zBe~!eqHRla$@1P3I|&D3+(xe;Z4B8n6jyjIfz){E0QQk;4 z<5A(8q;v_qLv?uX#%p(U;U3?HAh%$IIPDu4OG!w|P$|0Y6Wy=M?oy3fOr~+o%f; z7<6cmVEPWmRl{)ed#k=?h;={u*wKJmkWo&Mv)}$OR3@h3U)leEgXvT4O+3t^5 zb9YTcS&aO^5>upyxaaVrAProF!OT{rap;0I51*r`#qBz%QPfB>oF!$0Tqu^$o$Oa^ zG40>OS`di*A;M5AY(v&|B#Gbe!Lfwy#6uCA?N-dF6c06%<{-4q>y3Dyw+2qN*#V6c zE;%%xKCz*jaWgqAYYemPRqYVM6<+VdItL4^S|VL%q8g2RW`uJNKP*hv2(gMP)NOm9 z{Bfl$c_@Cocoz2uUxiCS3lfRjv-W<8OO7rZ_!O*nBNHQK@_6NSxm}|Ci43qCe-`?x zf%(%eEOOHjE|@+f+-_(c1vY}OoBj@3%LXTGk;+leI^K>WFn2V}Y@AS~A8}d+-4m_| z=nHRbf`*7EC}Cd9(^kO_xuZ|tzG`PpHt5RkjSmazf*pEG*yr+1I46H`kz1{|#ZQ(X zy^0<698+opb)9!!j(3{Z>?I?!#Q9WK>g(HUGB}T6Or`0UapEN^EBXo0b7+JTHx4N_ z7}qrA8~$;&B3$>`XS3mM2Nrk$*hF*pXj#Kuw=U42||*oT|w^ ziRl?_#-7aTVyyuO3a-_4_gc^25dX~C>bv0M=}@1lS+$sQiChd8hd<@l2gUUka&O$L zb*`?RJLu(gb>~lUk{PR3`_SoyvL+q&SMQjw{RAt~utN~)r7P?=O2<3hw+h0~M~Tpt z$CqLzF$_5%RlAkZPy94vPgs-&Dx@3x$!_RswX7g7ph{V1mFSt)C~zCyZGI*{i$8e% z&?kDQw&#&hvvalltRK0mKSok1Wq|Q|0%}QZ5y6Oi*^9rhdMwJ`_b8#Vxd?$WIC%3c zV8`cDBD<+ZHR&0N`b}?mPTe!1{p7qpdZLE!KRhvgSyd|IDER;%4#ATp5Rxm(4 z;BN5-=_x;=2|i%}!D!0SU>xh1m-xP4+_tlr~kDf5# z^d5?yvu_do4UrdXzdZzTF>pjZEPUFk#{`uB_ca;H72P$l_pkyh zKoxsGEW=2B^wMi_?%^S*w_{M0-=W%O@O{JI96+w@{G9+o z6e-7Vuk(w{4N$p}c#Z#D2A!dJBO6>)wktdZX$d@i!+;ax_r7?AL8@Fpa{rwW6^SzX z9j)uGjXbPJ!Cm=HN6b&Po{nKqjqDE9 zTU^}9x{IT6wBik#vu{#|9xdjMce?$2+{jfFu@as>a=+vC_+F*QTfRmG;2)%5JrIrt zj_PmE)Kp;JVw1+k8}`mI>EVnh*GHc;NMLj9L_yLw5F5r9^&Sp}0IR&XwO2~v_DbI= zYG8RMMTDL>tg8#u&+dg#o?D$A2-eY=)QuFLR(Tv~@OFvU8#6Lh-2E0M-$=m2^oXcv zY?d)2`s1k$!9Q#k@3g`+!|J?pEf&LyT%|xmZh~?bCcB}b;I1n$gmWlVBd*uc)}f<- z{jli04>v-neOhzX$$}1d9f@S0p?w3z@zEwUM0JRFGRWsX8UUWeW}T)M|4lkvXOnTY z*K6^szFt{pzyS3!|H?Naul07*L0I+`jX-4vnU6N#W5cL%)jCigr{Rg2vkOZ zb**g@Vv_AB_a(UfgdIS+V@TG@h!s6XZ`nSt9d;0VTg;5J2{+1T@`2}Z1S{m@9z4O=bLE(Y5%Z07(+{wKUa$4Fjk&#KN`u_hoSv$wykcs1wQwB;GP#OKN)t+m5@Lya^FZ`v;`PIerPbwB%3Siv) ztBvX3Ha&s`nmX;Mw-rSGU7MPeoNs`T`d51d9^i`sUE9}k4G?~!&%P&pRi=E6DEJ-F zzIn0z#nn{subAIAMB|jHv;^FZk1&wbz$r{Eu9b8w!~9Jg+4hx|0@#rb{BIRFJQdIJ z7i-$Dd+NVaifm8KV9o!O^#WzF=Si#II-u0ZmE->JjmcSR;J558jIS;AKlGk||BxmY zMUOw#0Ag9>4W<(bW$7J-0wC)6!o+TX-FITv-@cY_HKlStI0YrQ7eeOzGvJ;7wPn3{ zWIDKY*1NdJzV3SKw1~uiZDmL;)|hcKWpFRn0I1A7>AyC0Ypt$WRsj86+By&U=fMC? zaD+>hGNH?scZ4nX072_+ic=0FaxfJ!=Z7is=*DQ2`Xn!{eq2BZ#NcWlhZ<|%G;757 ztbhwzEg9P?UinUoN#9)O^3LOyAyC~oN-d*pci(qSzBB{Ptu#Vt9d9a!*460`S*UV= ztfkX07q7@=5`(T_A-p*od)uNn=e94R& zoK^ky+WI2+wJ%_@Gn@0t3?@4(|HTVaB(|KL9?7_hs9@^Mz*%otEO<%;uo0%kiO+5k zy;`o;YR5F)(z--?lLaDsU+dm@luJB>U>!=QbHl3nO*rFz2%w+|I(=?G~Kb zZv@|ZfSi6H-C8*2Y+WWGx?Gz1A$?WUYEx^qZg6O&T1riJ@l4FFzlFVl_1N{SXNy;8 z#7l39kNiS(9CGxdjeuQ6J@d?af%x*HruggS;+oEFXs6RCroVbkQFL-z!@8Iq zFvk4k6J+0*l~EoVLHeZ~!wN~IBgFWRcEuI9(K%y*mbpPO)CY(5Eph0$b8V`n3B5X< zTbT~t5=*1S%tJO&pL0uU1sd7XE@yDM$hADc3=1`4r|EGo{)wG5ekgX3<^!c@({5*7IH;4&$U;Lz|6ca~Jhh z2bV2Wi1l`b!e2seOA?I5uUDnYyvq@;>vfIj1GGOidz-`6cXYiT4P024clAm2OrEK5 zwMw%~yuyD!zPLFt=0N6Eu&92yYw^RSitxhqDNzC23my~^PHsfk zz`5YL9qh4YrTPHDh!-u)yX4On)!WL6bu8X@Td1ir(iER(32>IuT%@dx*N+3mZLgUY zYPfZ?`nj*3+1Y>Ftv!7G6&Kqju2$oCe?638t#`cYz?grWaJlhtNW*|1bww(_&>JaO zdF#;T_s44Yc7yz*)<>!vT)kU<+i_}*w0!NnJeT%rIWKCt_Hc^pi=LbvVd^GSwkfNhkppFS4R^g2=2Z#mJ*;cZnP&eshMsLy_8 z^6`Ds3uY$!3#c}3+ZB+=1@Mtjbv|+few$BiIs2H#LbOHQTJlaQYtwY4D#1^}V5sDf zD-ip^LChpM6J(2B#&l>=DQ~G`i~f|1M9ripkAM- z!j4K6K}Ct(Y=sz=fiGfSGPXCb?w>nz_W0q8?zWLaHd#ILmn-z5Z%7}^Mg01*4IyrSFazZGkMFI!Nr#-x9%6j?BTo8h!4&c3o4JRTpd$b6I#D=} zS&;z|HxMv(bBn~B9Z-C%!Li!N4eH_-t4-7QGyZrNwhmV@wRpmO!~hFF?z($0bRLR! zA5oriXs5VN?^?>#>FoU8Lb7DuOr##g*J59)P;tqNGIr*+IV(r3@Kf023P3@qu2-`d zvWCT*NDHb{to);}X?7#$0bxGwcg8^>t{MDE$akvvHy2>%?E7YmQ;_n{Il}RQ+KUt+P% zd=k_RHNNYqohdK*np%XP)G3+8OK(aw;I0{Zh}cpf@iA1aRJ&`XLThd2Ur^-tQd3ur zb+Su8MiZeiRDetR6*u{IljaI&tmV%YG*WFI3vk&qx2qSAEf&44kpX2@+XundTztut zTMKoXsESH;A?;QJGReD`hB2cEklJaH?oeEPS`?x*XNdD{^uEsZKB+b9`=il>rktIt zPRW^{6c&94aoA?I+KM+EGl|-4I?i_Ct11CPPke#~4^}Z};x{U4Wi$4>BBZs1aluW_ zg$34;tJsQ9jwg`2Q#|E$t89l4`}Z_(%lv0>T`SpRK#Y?t_Y^njK?|Jr5UT(78OZti zg`hgvxZN@XvAMsp@OqQ(PAnr9x^B)Tsyu1JKGNu7Ywnj&XzK~9Hl$74Va=Phjj6D z3t)rR?^kp$*dE3VesR^1V zQWPmtTiW#S&sAY?3HRasvFJrp>)@;Q$+5w5T!P=)bj3Q`<0B^u)mC_&~V*k#QuePNs#8%Pi$ekVsAYJe}K# zeP-G!K*OBFOQ{{zSa(y(Th*y+X+zcy`^{h(7h&J_u>6}2B}7RR-f~m71x@Wr-x69LF(Wv%Gx*OOPE_&P+9P-m9#AQ-_ zsl{MUDmQc}bG*VvB%{U+y--sgz+}%poj2R43;RtBOn);BsBw7QE>i_$u%q|n90Rls z=@c04(;<0^GeX1KCHJ)rK(@cac*td~l-Od&Cz;+i?xGjnl%J|OKH7e+l7TxyoMDUM? z7K^KtM0KmBIug&#@oB*&3ay*XZWSSR%S*3bog63%w&>yXbQ_Y0GSo-pF^Fd zJtgg78PhrAE*4_oiE96W;OL`jAhAaf|InEXBr1MUVVPGj=p<|uUG1v#uua1@1ry4* z*y*Z0DftS>=tB?>*#1|>i)*m*}+NMg3 zhg$X%`*-}aIq=LtNMDU+e;gL3LF=wgQtWzBi$-{ zlaJC>R_sG0$42Rb(HJ&5a@_320U)@Etolm4tZhZ1di9UaNx~(|VlQMgpt6*J@CP@k z(VRvuZPcEaZ}{BnKXx&GDczV*)IikeKQ>f4ABvUk=pX!&dQh6J*sRU{ldgA8B&8MK z3*<44wx2gGT++TO8rd-rYzvj`cU6c|g^^UHZ;{=`=1d+nAGz}*UXa^~KP=QhAqN2` z$|pn+uWZ;fV#;X!3t#a^mYqBVrXv{JOtur0ap_0u zWsg5)5;SaAL{{eo#xTmz6EIUzvD*qHlM0-w_)fD-JA1y0D~nE{WB@V-qGHdF^+fAQasJRIE7Ri1-tX!|lb%~$M0#Hi0{*(jb=+Snls^bYjA3Qns(4tPf)UbPN1n134D0oA~_z$ ze1LT&$fya_;p}h3hop8wn+g5+H@L|h_Uy7c+%F%EHibj&4~YS-W8`qyp75Tet71*rVm4TePOxUU?^EN=Z-~+J-_@`~H40&>Xjs6E!?C1X=D}4qqPK5!z?)LMw zu0)jHEki|h`oMh`T}!pldq7T>uuOn_!i3ngc6YBH5F==7opez2qzAu%*zA$$BLDa@|$N3ofETvkl~fWhOwDqi(~1#x<5r-5Mqf$8++KPZNqX}iIU zK*iwop7?H|rm1(|`;$*E7_RZj3MbIBA+pG0a;SEVT|(LNP%*jtH~5sAh9{o;7b9C& z)8ge%!~?G$`9@s9kV5R!JPZ9^7vT)+#sR28w_mE>%O4V8%p#99R(-1R(@hKVi?QWo zOTU;r3z-?T#|m7>G>;kr;>&mWTi13?DtE7(sm+7?{KQGTmz9k?&w;b9mPoe}W2`yE z=5k(mw#jBJLWpkcNhR`;-U)=Pak)d@&1dT6-MkHaD<-bsuB|uigB!+{_ ze98y{TEMC}NbHL2h`pR@7f2XsRryC4^WU?azi4YUQFbddws`fa=K=y8q%`R=UU${H z^*lS;Yq0$|mcdoP_=NvnLa$N&oUs~XM%)C*`)k zLo$qPy)!A2j8E0|ie%?I`QESko1VLfH6!W6imSq0SLp?hvE4hH3Ik3VXcd7oQ_2Oo(utza+Jh z^0%(xAHI6YAG)2&%>Sn z)3ed2yKJZ3GP#e!WXRay6EfBi-TpmXk@0WXV7}>y{|qWT=mn2L6s5|4B59B+uI>aD zRoVqxrLopb8E9l+i|?;;1{+{@V<Lh49X9M%@$on~|Vl@|SB9Hw+h8Z(rWJh|zfg ze-??@x6f&fkQb|cbG38yQ>K`f2uv62sAktKrhI0Yd7D_HL%MsJc!?o*SXfev-Y>)c zlgWLzX@;7GfOHk;E~i*S-A(M3Jd>1|xpA?H$vE`njM5&5CsW!h4n70C0@xv+7)k5? zng0Q2Avc%dF{i5x)xwr?p)t{fPpnd@m{i!xVt)hwpzOuSJ}(ib0uCg%=7>hTDpxB5 zoi**Uuf3s`QH)|Vj!Pc-!sun*varmWnXV%m(YbtW^kDDix}P>(7K13Gf4Yjz(eueY z7gMKl1_IS);!Ke7&AE5_+7&Z*j(wfjw0Wd%Eb`7 z={fJq(R>0G%tqh}Yfe&^9K2Vo4}S0~)}ti99IVSCrr_l7Qsi`*x-*46)L1uNR%x0G zK6GE8qTf>5hIQCiR)2chwl1S!k6(gaj7C{EgHp42i+@QQ@^z!y5NGcCM3SPY7R;{680H=s7y{95avcA9^<3Zg9ty z%(5jWrD=Pn@w{uKD`MxucYaWHG$2#OFGZ&{{y*%!c{r4P`#*lSdP;J)QPx&Up{!Yk zq>>maq^u#kF)`T>zf%NYAG!%V^q#+Ym~81%ik_xpK%&+++u zzt8tLzQ^zQ{O7nX*EQF5zL)EKz0UJ|osL<`*(SsUcdA40w zzE9|&Qn=^%i!_j6Pp`epXFh&dPA=N6`(53m0&PX1Ac6+A_0QtvA zYtVaI4WOpC=i{Q1H2OKcOerkYAyj<3Xi+URCQLdrm&?}k8`?c7*290#^bh-T9Y*3s zhSJZSEhJth&Vrz<*#~CU$Car8*V!GKszE8A4=msP%-xkWXKecd+j`6@Q}^v6Vg>{h z2hW8aPx%Pw+6d2x@2qB>+8-(pXfLYjfQ3AyyFeel&4fs9nvrsk^YU1&>pxDaTCA?A z(Z44s)|7h$pW6C3!find0aW_=K{(Sm;iN^35`KNLhIjmep0Evu`Tnjac%Hrb(yBx1^p+?L{B= znFSF5O1@6t&s_Vy-m&(*X$xX=VV3uz)3m87cL4%OtmbHqY+flRK*-i9)k7cymxQ`s)-wSj}Lq~YJuD}#q+;zfL>FW!wEH-hf8&Z zdJ0K81108a!Fj9o&57@@DCaO$-ANyz>z2?@FP5|fT#DXvo_?TR6cawA9B%8C6g=gG zK3>MQl!X;&on*9e$0aO~aIMX~J^M_hT?-t)9m5m_nV9v%$nnU~>#zeUO-5!Hr=@rL zyRU-mM{hD=oW7{WK6ub&Wsoqe^K5<_GW&b9=FpzmrC6S!^Zaee zfZO8nxqa6(y^xjS@ms7*)ug=JS2qP|%WY}uVc*_sPhWaZzGIk@rSlB(W$iDi56A5_ zO8q7yWvr&qw56Dx)v$SOMD`KOL-GUR`9fsngzCj3Ai)!El$q#P!EM9OF5bAC)1*V!3s2yYN`ntuh7gL~b8Bx3Z;xef-8NY2p)mX5#S%nL zU34q2p{l`#BLJ}`)WnKpAdM)gKp6x~v+8=lc)d571X4J5SYUtR=Kqh%jO8ZMnt{ zQ&)&N7ad)uo*8!-C0oj7pc)Y0b%Md|QmFHIt4wpMiH2RtoJ!mb)$NfD2=ukKz zev5wq*xvIXrBa4a7{Rb6d;G2{jPx%067kGh;zp=&I9SB$zMpq7NpK4aUVRyQ{jbGNkt4=7> zgAe>^>@yXuY3`rdtJDj#8fO@1wjn#O6sWZ1osxDQLi&7SgDQLK>iw1+2Bed*Z zmQEw`%;L$VvvRII_ZY%l0rZ+g0N#@9HW&b&+NZiG10`3+)>_`_8J>wr4L>~E%>@-V zdR$)6b725UhGzMi&~vWERTNvih_A~cw>HYQVN&O+v|xb_-sl95!yg=5-A^l?V=Ofy zUhtzLGOWT4Y*cZc97L zg7d_W$q5+AA&|O2`;|_@%OB@S9I7Qe~Gc!Ph6BW$#$Cn=mnx12yyN2GTcFX_hr<*RQfQR}doh zHeTXFe(r1&)A;^rX+#et=+P;Ry0CPIbk1oYQ&N5Fwbj#()aN*JD$WQjk;h%TZS3jV ze9eO+LFu97s}-n(<;V~O9jVcCq7dqPou$Dm7yU~!##E3)S}KJY5;c4u)icVZs0a#_ zpW5v{u6r>QlYiZdiOMbyS#_iaW?NAGvumz+34dxr`@edd=MQN6Sw1v%6gS&TP+{Yo z$z~9`e^R`Nv@qI@T|X^TZcE4#nKEtg4}|Oa^-&b;*fT zHT0Gq9Od#p)4KaxPM@9D+QPnp+@Z@>PisY~Yc0P`>|Xi8?+#wgFA`iYsR zL2n>|@iYOQXoxaxFNA6rP}5J-s0E|IN`sI1vDi}jkQ|m8r9J`va^1`N0iiv3Pytgq zprJTl+lyV8?{Dzeajo#eeF;KASbhP~?2M1r^9t`|vBdfP?$pQ{Rs&ZpIUt7L2UPOX zYu^QZbhNhDUh>N_;(mY1MvSmQz9M~dqrPNQN3C!_LWKcdJvdSX16g84EoNHX&y%39 zDVEAp$-!$<)gsh`G;B?wwHK^p1ixG28Dv*h#lYh1!rC5{EI}QPY0*gaCpAe@$L3X$ zYfm9<(8<9%&@AYyxG@*k)A$YBE)|r?k~D5-E+&0ewkIvUCum%0{dmApd~xnKcXwS8 z?Q8`sfA+d4AUE7K8!@06tRmJ&ag`)}@&_8%EGWi6PRET}VMs$quu)%+-GQ01znrsJ zWHw-f*0+XiF^M}Gg7N7Oouosl-0!G{c~mD`4s z92!U0%A~)BTn{sSIIR0=a(?S4>^S?jA|-qel^W@f93-5?PH^U4*EQth>OGmf-Dx&f`dZqsFal`HxO!fhnJ`DZx zH>mBTwN4%X_sLqJ&+d-Er7whSe$iXQ0%Ym8^FYe5SJZlm?|+*=&EKQ}UcYqq;a|Wt zkbcVk-{S*S4i`QHF1?`h7w2(3typrslKQ{TEo>iES-q*l) z*X;=43!n$gztER^_|GbfT|XgYu=*xjmVW;8e!Whh`>)^VzfUxZz3Bb~TzXjg|NTi= z{Y$AbpmzC(fZU%FrvH!UB=7sEibxMo@vHcvXQR>#s32u?IP^6^;KPWA&itjx>HqnZ zJxu@#SOJmDGeu*v$cd&_xk3mOT4H~G&p$kXD(@Q6@3djM?n=l>>)$J;|4+9C%e9S4 zev;|U?u~*R$8(Q$tDX|9gn#$}FS-&UoASDGJ^eh&5btZ%{Yhw>wTydC{bepK-s6M5 zO25hlSgbz<3KezVEnTuBG7X^;3{=TRkDssz7Iv?0md7evOPa7_-(_OAr4eUqg(G(O z4&3_0eC{XQoqjTb;FrdD%$1_2`P-H=PL|@tP$jDK&+MY0#;8JLu$^a3fID`elCv!& z%ikL=Z#LOK6v_Q5KrdwEWN}$6@9Z#1Cw9;d>g33gbik@XyCBByQ;~MX zp6RHW?&$l3{Yak$Tzf$V)!SN}t)kxf_;dmIxIkKzTh%~iDG66x=Pm-3$|m-i=!b-S zd$Du5-MHzkjIRR)swL@PN?$2eb|ip{VIinxd9N;LsP4on_r|z-7~DKC-k4vS^Zn&y zjc&33NEQ23B2!J4&MMOjE@=m|a3WDTLqjzQ3xifYb#@2R^4sfE(R5+^&8_YwPO<+2 z$b=r(BkYpwV$E!k*H3e1sU?5B1~*U{xYy=jWE&<=jk0QV%NcNX!y%`v$5WICgA4io z)Eib_YA)|gFzhIrS=x_rH9ok~(7`x7h>ge}DVxE{g>SQxal;!@xEEUeyL`x+Ac{|f z{D@4`P+=F%X{OT1w$3~o%Z5scz-+C*yePI1$?|%s?3~*2H4__jPB|DlH|ds?kPQk`>KiA&cXz3IHDf&*@^PT16w<1Z$w4Atc_$k#_@}>uZSk1apnD6h_)1mQKd6og30<~LCcd)7~XeP-bQl!Y|3#$Uu>h76T`ggD6NBf!rH`cpX z`H7zKzW9UR+B`P5-#5E%qsZ`^ub#G56+P?H6VGnOcvN^z@k-sQTx-k89gW|t9+>gs zDhsN+#(Lkwe8D}HuiC1{A>X=G;w>&KBn{lL)n)!HnJln?O_<)pu6t5n5auHZR2M3f zu3#s0yCdfP9iA^+H+3udhm+bM;c0IPysVowaD5wDm-8`Pm$xX^r=fim{%(En5sEka zEE=~>7|l}|oilNPRUpJCQoFd2K3PcpF*aPfqUu_?CbVLy@iy;8A3cF_zI3P40FvR& zsd&pzxr}Na!k?TDX&eaGQk1ZST^j)v5twK5hUkqto;xMYj?p8Cvwq1QBS%nF;la3t zt7i5s664C+1kdrcqSz*HPeHiXQmF7G;Gz%%0=2Il4KbHYJFw2DsF(w>0MnmXtw8;7 z(;`c!LJ&w#_iX`cZgwtMZp7b=XOp`r1<=oZVa5U^wa;u&uI`6Rs}_9p_t4lfd&L;^ zV?_*;UE5!9I>)xq9K65SMik9=H1cwd6STxMUxl(|DWq9#k+bJbWyt zUus+@$8|E)mCosGHX8B6Y$HuroYm7J1pw>!#(d=D6ca5w6;kMEmLcBxlm8T0KLpLX zn*M;a|8oHo2lNvOq6tWl+Gkr_b%t#Wu!kCssPtej{Bk)e~Pp7K1hLk z_bRSlDna>AeC>_SsXG^{*V_(TE^5@Mh+WE#TSd^Xo*K$A{jfS~f{7}Q@2Q(#8Z5Ju z?`((xxKC=QjOWN@f%%DsV2ZzSzu()sa?CgH_;$N8F^bk^rg=vQMAM}=!E-JwBx?$H zKUq^f2VNCsz``Z$t)ip(h8fgqL&6Lz_YJ=bO`qf|B6E^R#Qv})nIM#a@-o;pXV~dI zU1zGhFr_hUjvhCW)?9_zuK2l+nNuHVN#R5(4o_Xm_(+S#z5d5~wDymX`AeY_3**I@Eg!zR#4Lpx~g)OATq8WwG!GwnT4 zfMvgKnY>vx64v*A5vE}#f@~XCPu1UM+iYAp4JFl<4!|2~EvW@@ ziX?+TyAQf^3;3P14s5fJ;1UAN2-aNoOJ_%rKxNa!EyJ{_IwGflIx8)nYJ)>DA6sp9 zn3P@&Mo!HqrFF0VG_Ypy=fba#pBq(U&8NE7jO<#~NTSCiqbR|g7M881XrD-sWF4Mm zY{RP!EMZD%hzp-j4F86QBr2zW5bg1L?^119T}HeiW88PCPENFLqUIbiaV)`;y((Zw zCzA^ofcB3d@NKKm8Ut6RfQ6ZUoT?3ayWLzZ#dq3`kH>PhP49DnWHRNFYu6x3 zmZzT5Ubm}@hy`HQGUWyfogB_c*168>v{r|gQ*b!DOo7r~FV_swNJJKzC(*Y2_S6GC zunh0;oSyUCPtAeP`xBk&)@~B(Z!#`uV(~B!b3S9HG87pmAUBVVq4I(fIp+@XKn0Dt zM{6$*<9%lU&Y*cP+#yh6RDf(hnFO>a+&_r|ym_EM;ppi?4Tg77XThT$U7efIFE{yt zUoP|_xlrY_-mO1nr7RW>md)uTW-Bq1`wL(*)J&|RDLb{lEEoRPCt?NBOK~c#BYeH& z6(6jC=EgDw97Cu-9{>p+|D?u?`dDw>^k)|HF#;xjU ze2K)DajwRh%8r1!CPabF(rd4q4VpJ&5A$_!NNk3BnzL-_5cO?y(~MBsa=8N;W8 zS;@+S6LX_jC+Shh(6CW3Cjkcvc{S+&X|&SYZ_qIb))z0Xn4nx*^1Vfcno)bLeOue? zJ0nS6M6*vk?RdNv-=rNO=@EBx_$=w`3IjpDxBCFg(B(r(c=P&~`_4K+fN9CUspYzI zKR*<@H8_d-{^x=r%kL%{_%tnFYqT+~k3-1T-OQLfO2ZOtrkQxWU^hS7VW8bBbSb!k zxNV78Qq*i?8inHHvX`pnEMdD3wDMdC>KdE`%4>Qh0URfNvk5XT*nK-siH+H{w)~q7 z*15nhs09i0psV&FD%J*B?=w8uO@ygu?AkcH?h`0a+RDp8+#NM3{}ZtIfsma-ou1tb zgQlpYfuQ<(V3i`@PC-IY!H91A*^uuJRXR(w4>ahk<^!y!%i2Dz9NF%<`KBqcCrZAv z^T5})lt4y$dP8b{TxSAYSfA#IKx)iArCYWhQcm1;8bF|Bv^>pStCJD{UVhAcJkUp2 z@&pkX;-0F_3+yq=+1vh63KQU0e&K7`KLpfizoMRqqTq>)C*{+lA*wFC0H@KkN#j2D zp|#u@yQG=d9;V!Z%aal(pO~mhx$yB-a6s?^RB%UCN_z8Y6~=dPD13|Omudpym0X^4 zSg@7^1<2*{wfY0r0fnqHmH|3Bo1wpS2>hF{_rK#B z{;NMR%eNva{#GFSYlrNAkHNw1=nbv+^^O6be`{j|q)oX9ZQ%CbVg2bWQ=o%De&M0@ z76)OPzm{73-W>Daf-v8LC%+ZRUijx?;y0l3)g@EHPr=xiY}m`e$`X`Z!O9HGX@9B5YtfKfR3cSq(5oPu{B)& zSvu|CFe1&VBEJ>*Zdv+s0{wbBUGyI$3()^`?nZsm=GhriHKG8XNMSnxS`mL&^ZGZy zK1o^r_oDRKKY!LY>bd{mua+G(AhmiRPMAF#`A^z)|B1o*{}iM8dkchrLrncw7sYz1 zDEYVa_TL*1|2qQczdXmjqZUR@jao;!B|5PGE5ESNkcDWp=QP{Tox(W(^%vHI4*20M z@BO4YTdY-)|FFB^%-M5UF3ye9pmQQqGFqjmxk1{)5xx2~slOdT(0=pv_9KrPZ#QPu z(i~PtqFY=w=UbmmmqF61BwnX0bEM3Sw1>M;9-4kFLRRH=@TD43{4Tab(+QCD-GT9! zaZjA)gSp7T059K6t)7G0M?+el^@rJ18jel&>&rC74JeG~YjW?^?||=(V~^K$wL^^h zc?)y;a+dIZS}>SOdd{xl=(?|VdDV!JAlJZMfge=NLkk+a1HCu7zPA^lNp$CH-f{^6 ze-~2ihq_t!^O{SJ$vv~VVL{&m7LhJqE!2(5EWMFGnG&eMYh_~7b$5?X;y@JnfxP!W z?;zrJv*9`R%QcCsYIAFkO`gDT+scriL-fTzVAVY&fM)2knS$o_2iwzAOF>|+b#tnw z`bfDe5Rtx=!S9O!Rkyo2Y2&weaL(UoQp08xhm}01UH44ab`gNakC$o&gcG;1cQVz? zQY5-Ng$vQ&yZWVS>&QXI`7qDjc1g#+=(yk0!xZt}^>NS#7K%g^vJGb(=fzS5l| zB~bBVZBeF8-B618R8@{@5`9RP=dPm{MV z94J;*RoCcdD4f2SB{`H?GXQ$S!wq1y)&G%Z6OhHaCP_8z{5C@5>H>WJPrd7Y50Ee{ z_LW6eu?oHtrHCui4D_mw)9DE=8%tZlH9a_j#%)C*^H;C66+IIbH%D?ocK2*fyBy8H zX_z_pv;o2|p&2%$1Kc{RG{ouWsc)afQT>r!u!~wG^Y&J^8uLT%%$?-`5Q2f)W%bQ| zf$;Cam0F2-tsFCl*xC~>YF*q{Gv_FC!+y)8(fxI)>G`*>c-xc?<#c^H1;O{{x4vXQ z2wWYag`OzeH@6&|JV`o;LJ6*F^gXzu=-o7FbDs0gGhSjPvr&13uN|;NInInj7x6DF z@OJT@z2I*F@$a#|@Qk{oUC-+iNJ{kAcw4%ZqISN%1N}ymLa+Z^kn};yVqIE6b~U!x z)SaU--e#HM!Fx7Pdv-82l;ki>++lkUea_EAn?fB8xzF?8-DeFuf;Uj@Nt6)Ij4))) zs;Lq@&G>*eHm5$Ft{dM|#>ma4aNp3^q z1fVLgoA>n$m0!ffDHlH-ypD#vY>e*xG)WeGV_$YD`oj*kB7+xRSMW(u?(q(iMi}fe zN?@-!L{@Wg%qn!ZvVA})_#d^sJ2cJtNtqRPKKY=+8k4a0%VTG@X%?Qk^*)*w{wssuDK6OFXI$WW^S3$-iH!)!}_Vt32FkDd+WoX%i z`FVf%*&y$bE_^ss=eP&N=xLK(8NN%#S4h$~B#&j4{$uQZ+Ebue>5^H3RD!W3>;SbcJ^i~8Es zFSlpHDu1Yu=}3U6K1PKgi{c);=cb3YA7qDW40rmdW`F(dZwHfF;7nR@fcz2?)2 zu>MOhC2CWQMz|Q26|qB3rTKn^P%}gzW)zaXS>1fUv4H9|U-m}5h` zCo{M*66vj^G_0rRo9XiE$wF0kA4fg1%I+)MAkF*P_1c%pVaxu;g=zM?(6OC1A;vX@ zfp6z)Yi|Ht;HtB){Xh>EXnL>pVbS}gEKVr7)u|^3yZXiODRmOo+%r>mV{T7o$~V$( zzdBUJtWQ8pAa&~#aC?=KlK!@tZN*{$0ir-93?0m@ut?Jm>lu~moU1{z3n*$-zq!!% zUb3G47S%|y@}lR4B-<~^?Qb0dN7AmC-_i*sfRp6Ubz`PIrtLgK`?a6lP6}Q7qa9dd~LfX$AtGi_==i06o*n$Tmu-y) zy)oo5z2uJ9t{d6crjw3XbAxh*(j`2@bT3UROT~+Xnd+I-ocUrL*A$r+ zVaPf3Z6%Jr?+e+40eT%sFmx)7z@dyB=)eXO0pHIGaSeoQVSNGi3iilWRqZE;^_Gk` zHX<&GD1`q=%nLwu4(@e-=I$eg$Fke|Z%@acEnjxnY|2<%diFhmnwsR=%zUXa=AZFo zd6c|8sCdlZrA~WE!Zl+svnV~Ue0kH(lomE*OEX(NEQz|$nQh)RXXqWmeP$9(DXMPm zFThwTbb5h*S_a3l&69TPa48($5OC#vP-66MqfgG7E_aPz z69=i`T>Abnj8J!u7PzV(&ua|y4!}`!dWWlquvEQQ&aK!F$yr`VdFw=ZYpj>i`+Eb4 zCv2^;Wz)T2*>7iE_a0WN{|NzWe|C6t{XZUUI=p$)*}M1!Ta4)S8jrq4nZQ7is-+C4 z(CrUFo))E67Ux~dEVAm^3P&4Wf99RfR0kqcR?Am|re_6}H5m8EhFooMJ9$Z$!+nTP+Oq zW`29Gt!14M2z{BVLq9YAlKqDA1;11Bdwtbi!m4W*^?Wqgnvco6HVsI^Lr8Z(_8t_O zAxm$C)CX`>8a4~ds*;s`goW>buRRF<5cb4ujmWGg2g_0KO2xr<$qpVX3nTl+xSBCD z&XQ{#%Ped?Q;iqqJVClDfDP!^O^)g0AQ(~Ui&my)!VsyTGp6kpvVEdNNxjwbH6c(u z=CU&6I!sG8WwgE_@)MAXg)zyTH{Rsg*|39ErHrh)*p)VJVpwB-YtRg4P2b%Yos_9N zU(oj`Yb_zOX+?GdaRUWcx^EQMCXpxzyWDjEnG|sCO5SSArP<|>ETlo}!eIMAh_CF& z{n?rprsbbMk%;2IM_ng`Hq1L=yrAK+9vj(-waETkFYgP}|sL2 zx3(YVYVuq+Yc)wXcP&vqE$v=YK%rHeFHr+}2+*$rm$1H8t<9)bG9mM49ed71oPkDc z{oT+GJ&o8GgO7c!BwE~qGkw!C3lXoVfc5Y>h%2EHPvl$NFGXw|#R`m}ADk>qh`6W` zyBO&!HnU?)MfbW%E(?{>J%Z(9K7wKubyS!I_hV=MFv>)&%a7n105$2E0eKrb|FcA0>zHdk{VOl9x=;5DM=4S-ySo4r?cO=2@^$@nWnXr$T+oR1wK}+QUMy(ej{|`4A;uW2X{3JNWSd;M>F_jVM7k0fO%@;r>ca0y_5Rw{x^s#z`|UAfRO7ZMp3?E z8YkDS2^cQxy2;#@F_ji~hhNT*f-l?ynrD7{=5GJJI&=tbMY;5BAP&IHz?i@JPmZ~# z23!7q(=U^0{6Bc(N4{ba^>^mxU&_UpjV?>fZzigjjhj3-+UpjunpN3(z18!l=PgV;`dz-Z5xIOF}iYvHu_XTk%F?7}>) z$F6b&moiVGE%9~oifdQtjG1A`(p!Vk(2v-+scbalXY+wq8rZAwQ`qi!jc|9Wx0yv5 zw8Bp9$d<9(F1vU6QYEzkMtD~WNm7*>CQAz&xtWjfroChCYn2R=i=Um$f9G#>D|sb& z81Xzs=&HfPuuKbE>sdH#D)q5b7E5@UJFhi?ieS=}o!n$DJ+YuN`e(4p4fT~a#rirR z0g827f12l6-zyyJJN6y8sI1pcr}(;qyqgL=(AMZKT!>4StQK}`;)&QHof;|X_u$UA z#-ag+MmOgBt>3Q_!FLp^j(kjsa$XsI_4YKprf=UoWLOYBXtDrpE$_3O9jyU=n~3&_ zVY*&>L6~oA~1T(oKjFWar1*JXaFY<2g^D;RBE#c!qWTxIxqU=-X;rzPy0G7 zVnQ7!pJ6Z7NolxG#wvrU9+GP2RVT7uTjhU$WLJVv$8gH~QVJloCaKkql(doE2r|`9 zWr02;GDp8LR;jK4ZIyKi>q^OC-|2QOW#0=rD?NcP!qwfSWGchb^04DDl!iUzt-@=H zpi@3Zk|5UO=Tu1aNv@tjyNA^GELl+#SbAAWjO&(WnE7L7DDTS*&zOR>cEA@oM;4t+ zYAPt8#i4C=XRb=wcw9pSB)`UvG{9%k=mwfut4n%mk3Vkq(TG2T)g-S1(Pi&nz*jp{ zvLWfIqJXL|nCir!bx^9S?kz4m>dbykudhr_$7Y?7;Neq+vmpi-v*Mq}f0hQfH#dlr z#*`~Eu<=h~>VtU4SHRg}O@Sp4GpAP%XemT(T=`hEF?WB%T4pok}kD2JjpzXU@RgrPoc-xhG)@mnhphS#_#th)am>HUs;;Qdyd*0%t;{k{!9pu{_a)HytRxci zMJp?0clUQRNh8nQb3PUTkH=Z~h?y=1zgy*hj)H_-bS z$N$4}@C75!vogYMOYW<7ie<^gEYseltsXFRESKE*x?0#f{oIYL!u{sW$#PR#nKhn_ zdqocE`xr^nsSU9sm9JXxMq+ty841Di>TwFKvkg`-d384CIip70m_Wqy6Fs12y84H$ zSIniAZQ>t24*7P)@OrxXeipO0M@mARv};h}*K6_@kiYWwq>5*r3USooUQAf1q6?C2~q_NnUVLZ8gKp z7de5P;Jplf`!&a+=8L^O^+=t?3^!H?=3BIy|KJovVA6WbiVe~j;@+-KUb)MEx?fet zM4OwOQ^@W2Q?;EeC++sO>O|!h%e@jWDlT^ltY#-@_}4;0_(hT_No>;|eDAJ7+*0&1 zAenvnqbC+xY9Z8BIQ9KL{j>%Rf5z4VSmk8}-WoJ_84qT0-W%_e*zcRVafy*9PPtd7 zX4Zt+ZWr1jIdZdO@lx^Ra-QXc^*~3o?OV6E?q*Hwo}aN2lY@!e$$4^pwm9zAb8Brn zZO?OSpKi3K0%l79WGxuJQhULY|8{^^jkf4DA|yhKvXadpruk-vaMO{ysko#aJTn98 zyNpjFs>GtEG8Vy!|4POCiGIDR(6Wd$ZlB8_qow6uRo$t7v0=D*TD|zlRvi8CIj&Uw zTDo`9!sq5Sc@0W=ai0SiRDoqDNAAJg(qo4d!Mf40qhRymtZ8R60|%Yhq`efAZA@$3g1BA{7~VjuZ!d-R4sx2c z-MCJilqJ=#gjqDhKwvyD6`O6CrJtN;rCGGY)A?{B%Y7UTN%9y@9Ck%wRh14l3#mM4 zPOj!ZgGu+ixx8T1)v>q7D&v5k(hEviuHVm6qA+kNWbI3JDBVV%ji2-qVnpQcXp%o@ zm7*_6>cx619Kz!x$f$v!9bpQG>@d8zK5R{utW%beR>9bHUbKx^xI50XB#tzpX}xUL zByGk~v?SnM-K-NGGO6KpccglHKmC1^xIfZ)y2{0)kK1T>3KjC=Hr+-#_#XM5b+K5! z>768y17ou+RobZVrk0Qz&(M8U-;Bgk4GQwes4cY$FZkiy_)A-Xc&5!z23m$PpmVy= zO1%e2xu~u$!`b;_Dr~dV7VgS%`W~ehTPv^c4735I!-dx5ykf^IlWEXYA26z%uUR-| z(^`za)hk;@$nu0A9y%~Hc%60k9iCQ%+Gi1Sy;YIDTnr(QWc0-fqP$f9xH|EF%-jtPzwOgw8 z=dq4+DUt2SrQB7N7d~{9@X41f+fE(K$rLD;C_+pcO4eK^rhw0EYm%zXW{`H(y<2KM z7JjhRR+5h;N>119Ykk<#T>Kmk8}W6>wjqkf2_%x9FB>)cwI%QWyPb3AIQ!#T_A zXm0iO&X&o{n#A!i4SJE3JKE2u7I{FC|5w+O`H65rI1&qjK-Z>v$h9 z>qX-Iik3Xh%+$giz7(e^iiLR0<$t7G&KWG*r}Nj4X*D`weOOQr%#>(pVkfxVN6u%U zOu>{;+THRr#e>$Z{kV}WsL`1!v7pY5BrZKFe#c)y5JGIH$I43Q7kVCL`P0uvOK5&- zT1joECpA;N$b8N<5(^f>AsKRoXiKE}7^Jux3+f~nGGdw{ zf-zo;TIDtkiyG-h#ZO`yQ_LU)$yN*Fm;E+fbvMq6wJCmkHdtABEsB6kFL_<-1#?{% z@1~rez0wD1{L=JF+n4$dbG?@Q|CwJ#@B`J3Q|BVKpDYBBMr`fre}SlT5m#tZTh~Et{b!}$AUXh8Hh_FA030@O z&ZpnO<_(iA8xZ{p?ZB^|>z|<}fJ30de}`%Srr$s`R{+?%!EpS6SWXsh+Q6>II6MCZ zUIB;^oWNgp82|tQV0VKG03LjUb@&tBe%bxQ*g6182-x;3M*cI}2FMC(MD{x3J$MH| zbQ|z}1^|2;82JWZzAK^YEX?nauf_c*-*x!@xdwQle*$~}YGdpD*)IYRWMJm6Kx>@` zS*ObWAbS8iUeH)akfXN13;mty0nW~H|ANZ^?nvA8SDIkmeCvSz*zW`kV8?6(z#XZ~ zmH`0t@AM0BHYnuBbpm1gvDV)pI6y)Df>hVd_Y_z}dwxfvS2TpKn*e-{Sditvz@dPZ zYeD}r-TwbQ-PUP<@&?OU_xRV%>8a0mu|M;OgcLDOa}|EKhHlXo4R*-T>$?w1g6{jc zUDZ+MujYkx@9zbCdN;3jsqfcGZxDw%aHk~dQYkKmLj{2=&q5t%UsR$)X`f-rIeRqv z)Lmp`X0-1eMUW=e+HWgB@0shUR+-k2d*@CbBBadpzj!iL9@5n76LzvK^hPqVaSbod1StcP79)duUXef|q2~%H^G`fZ41(=jtmXhICClef1*5T172urqXr& zqb|yp;@$e{ljKqJru>OHb!l{teNEH5gX=~IBw6z$nL_ZljhudLV1J%%u2Ic`hv}IV z=wzoUTR$Z*!+XxnQ@Z*S)lMw+8m5;M8*?n(-!91U@M~x$dt!)q;5p#b3-ittHG!a$ zx6uvh)^Dmlbh?M&?T-&8=?Se|%n+{xuzKa|c!c!U^YptWd6))7lN64Lr7&M%#j1L` zkDqWdC1Sdm4}bv4IW5Hf_`7Y7MovR`hoEh+!sw8DtG=))PHHXv&l`+zX5Gt?)j40E z^BC^y>z2b{S`pY}Ubu+W(6n4WjYN5h6!aA*d5kgk4)!|@Mef_Nf%_G)BHx^rcjLEL;k zN?kM-)sL}x>SM^_Wcew!9&)BPAeSz7Y5H_7cK2Y!j6emSf?vr9>(} z+bHYUSiJEx*P05@Nj|mEo03U)X@w~M$Q_}+KH&vz^QKQA*wr-AuR7&UeT8&-R4~!z zyhB^`l_A(${tmL!fE53pJ*pbo#^G`y(}WJt?ekwVcMO`W)J)+uk0`d=T71Vplm5$o zJzL;Q>0U_je>6I(vSRITF`8ted}yjZ0(t)R$*&m_7g{5ZtuCtfZ|C=5H!%&`F3I?X z8t+0w+IKnA7+tr|XJu{?HHZ*^jJL{OTPvFI>{^nL4ur@EO>1|-4wI&|y36Mb?C&g{ zndmPBJa#OlT$j_}=|V36VV^NbyVLRcait5TKa3;}_lvgm*Z4eR{2`=rzt?iN_}W5MSY7z!bfZrIO#Z)BLG=#Ql# z{PU2;qh{7NAM-}0$!h1luSXcbJm8X)rpsvX!IGrcf-VP3rZaPWA*15HL0?}HJ^4R_ zzHoDfj-@j04Pz-mBT{LP5w|G&`5`i#jgZDK-|V@MjeUYOF42^eX7Goq)I7>o3DeTt zT<;i0yO3*HERU+LbNP%ry=?l5T}cK6?CMLwmp4k5OkCvuY<0g8X3{%mMYvwb2ry|4 zoRj_>NH(m=8ge=~%NxM0{`f-;Cr}}@or3wGoG=7G9n>@Z*}drsfyb33MKxU#hXzQy z@pate(%tESE)U6UxFa@((TU6RbkFL|m8TE z7Ooa72NJBdeNE?Tex?>S`oa#xt}^zgwolkfshgD&=n^N)>T2vkY3}qF-Dt98DmP;y z^rw=`L*(F#Ca@}FvXsr|vp9_k5-4>-b4op>jnpB+%&D6mp8&ZKT$nAR@~XmIEspU5 zKWrqH^&GV@crq8k&H0n#7k-yG4o_N$1paH{D38_Ke8jabX$_PkhAb#LCl7sO&`WkHb=D)ksKs+HRz4;&z!4NoESoiz> z{Ckjc_l8IPPay>$dNG7&u^G?!vG)pcV_l?5CMcy{#E=? z7l>7D1PIrok9)WM&!hg;g$IoCKUV#px%&V9vRZdvDq=-_-{>DFQ1;gsTqr}@ySw(&^u1Gh3vp;(B4AL zD7a%;sm%)$+s%qjFKNT8|P$+c`lsLE*KdQb?Pi&wrT~8&e2AlMAOPY ze9}c-)8!uO#^pKLyZ4uW9UOiP{GmGH7?;t5dr!M-)1Dr7K&}bz)=rHzO7)dIrw~P1 zfZMB%_!b8kdEXjsUaBVFJP6{I-L)}MZEVU%bCX{#Y=zg{<*s{1aHCjjLtE|m=7qux zsJgcm{U>&MbC&w5+|A`z(v#L*mGI2_bo&PSld2TL)p4rDJaN=iX1-LVSN(ZI?=q%_UT zB{S`>u~})$8m6h9Hs_i^_b8c0ZZTCYt_5rQ8T*J1CJ(VdC1pC2gdE9K#hBg9>b#}p z9aLGWbgK|ZE>Dc3?s?w3t=pu8)uDzI34s0BmV@@Z$Fk^oTwoX5PQD)h^+-SR@lvaI zsb{>nyC=gria2=EeLBJl{f1liK&6cuQe5eXlBr8f>&_`vyT#OpVcir4Z)r_$dk*_h zH_S*JX2mQ$ZDm%K-=sJyJT2FZ&^b*msxHxwj`TPYKH5yvX|p+A8!M?Cu=4mERh%;~ z(Jl^ACcxj2O`tva2upBL-F}bp~9TlUs`29lK;&0K<1woLtCQP*UwY z&OEvhe3cjX&sKfF_d<3-EDDO$skTa&gIquO+aAyx zjyH_Jg24u7Cr(zm%odn-YDWhco3M6X3Vx|A^Xe0rYa>vcx$u1(t9 z0_!O&^zCKo6e(qe!eFfP(nXBT>2P`51c+GrqiqpySe>5OWAGqUUb}H+2d>4j4(jaU z8~e;LVyTKeub8Imx*RD0XN!KVT5T+?Q#yzXq;H;VyR?steq)|zJz;&SkvLyEFpBp1 zxccDuFrHH*F=#5gcbPH0aB&{2;SVW>TDT0N@l~Fb#2hGZPRu#?{jq-)97BZ2V+tCJ z+|K$Tkbc+pdYtHS2;BG6tti!W%0bmH$*sY$Q52rCSuXZ8EVeH#o87BaCx1zzcIL8{ zx3wQ*G)4R&OfeEtqt*=CrHwnTShm9m9>6iShC>~+c) zW@3*I2K##MOXy7VSIX_=1@D$~AFGB_Lg4Ci_-|^q6PNz6#r>lmB+=a7hO@ixeYxGBX!EApi7lR;RDhk9%cF*9T4v6SK- z&=1fnCRQXrzW~2T)lSnJ@uyUW?w%V}NiZtPfKVkTS`9!4M?EQETkm#_ZcQ}E)tPQv zU;^smU)*^GH^%KUJ5_QP^<>#{kCdr1{k-ev(A0Jf_l(N-%KZUJ1E@Y|TV3MvP-^jw zq)AUQXS*5IgWm|h%e;Dx5OVX^g?Ga`&)%qQD6wyq-x>jD%4!?EXDz$X4d*WM5hqn$ zD66r|Bb5S}IS*5!h9FV3UB$(awCgP4qw*$%C818hp2HYKoR|ropL(s)`BMAj_dtTs z7<}tr+)N;h1)>KZkrg*S7uK@B&&5~MxnYvavE)ACx{}AH=+BX^y)`s#L?q_0%ctwv?!D)s|7&=ubLbk8h+U-h@Qn(UW3KQY_r*V zoB@x>yxJN>7JwZp_F6kUz^w_^A)7y4t52Ufx-u$RS8mrU`hJ_kIT$&`d(Yw}Vu6xw z#TjCd=1$n+g`bu3JSeh5llDeuZ6P-XmdNJlo5dw@1z^jhNhebc0lNxnVQrgu$RmRW z(ABs^QC^q(*@ULz)2*5xSDwN88%q>}#vI%n25}}-qI^~hEa{ir`m(lXW zl8<%m`U5nrDH0rOt(Paj=u zXcaloUWHrX#!ocdxl#a$yI`b4kbn7(dImN_^S)cNHSlU=DuLKi%k?(EHG^VGn#hDDkeD~`< zo@MHL;ZDPO6|n8@sIjM%`#80$kDPGCxZtdcxKY)Vq; z;xoR)v9&@ge8n`L)T_Y9U#~u@X(aTbz)p`#f`$}g*qHb!QW-(2qNyolK2XSIOV;e)y4cNqOf|__hINWGTw}QiAO|;&%RE zJ?U7yEp+bgAX2)yXir-bfKcOaIt(uqg9A}y4O&XRK^WcFPKdqo!ha6#M_$HI>M+Cn z>VQ(qT+8^PP2PmdZ=Cc$#J}f-iE7sxk2A==qc)yDpdXvjp9?~pyk`~N@F!ioU!OJ? zPjb{iUwNVX!%JjVWkE&B^}|@P?mdP%n*L|u)Pb9pnZlF?u)g0IzY4+x$0D_0P(!L} ztxiQ98lBT0%$=ou!PH+7nu_XjuwlJDM`VR(8+y{hi$uuKE46K>gk-z(@0+2Q$4(5E zedM5R+YB&LlEyEi?JKlVwK*!99H9ZNkF+yN>+j7{d_;yR;oiGlGDQbpV0vKn9y3OP zHq>?;x1O?+qo7XE)C}m)-UG`bjh%sIomQ?~h?oUbqLV$aA-E-uPb>P7FuS z!SHk@A-&YN4soeLTky8V-vv*4%xk@+oA(`=D zkHOe{%#DTYRJ7e7R4J~1eDh3Nu5Z_k{L!7ghGvL(kGo>MVD(&pj?yV~Z+wCj2u#pc z%YJ1HF^xrkes4pH=^^yFt&hh>LVFQrn%-tPP(G}EifLW5a8LDe<`L4<%!8*}BewS{ z&UQy|+QI6|U7Vl0^Ix%1tnh8U-kH)JXYwO;{FJKTw&4n#b3BMEGahU5gde^j6~7QJ z$hlBXmFs>ZJCAOcFl-){bdU7N+m+7AR1ZuZ{j|84EjK`STe;IcWzu(zRl(Z;QR zl~1IA0MjeRvE#y7oR75EQ*^%tVx`jv5%f~uLBcnsLjA6M*)gM~jj^DCcAvnoGvn@a zEw>;F-Y3_t;NU^xt?p-0<@Tkiq8H0j>TrW{b0f@jJjvZDrriGJ_nX zM%TJRF-Nv)&c#iWuh_V9%Eds1aO%uXwL~hj&_UaG5ns5(jHJ|G^{PCvo)+nF!-ekO zI(I5~BhzxV{`6fTIzNKtX5{`aipy}GJIuIY*Z^{ z^;49zuI#rvi7Jf%;-FYU(Xf~C&^b+S7;NsUX66815G{=|V#0_Fu zq}WMV-O{-K3WB-9{Rs@wRp4lj2TQA{(vo5Gy7P-9o#$hYIKt*!fAgR+vZpT zuQ;XJ-j(T^-0ZOw`J43yuunxna1>>z8>u=oU$$nhb4LD-=BPP^^1Q@)zM;s$(xWsX zgxra4Alx87XNCC&Y-U~4re@BTI|%va2@Q|%yl1u{BE8{Fh?~X`!UC%_kbZZcLj)pEJbtj;YL`WRs{Lk!-mTlw4xgoNY^eY9=%zAFeHGNXKJk zQ1>kdw#5H{u38S%`AAj;9=m=-cIb+zQBFYlwcgZ1??x5FgtBQEtBmZYt& z-q$#JWO&f{GAtws>TRn8Ps+MVWszs@>_sXYqm4@EmPo5|l66Oiv`XB&ucFw)1$v~*frZ8`isKRBQ(MDAE?t&Q@jwdyy5sO$=_xSR9*Y)J=>D7a4-dIis z)gnZ&lUzJAWeW4wb~COEe8W02AvLN4FJ!e{qrTu=pf{__q7WL z)8h4^Dl&rsb{}AUXthd3gbU`d2sC z7T`R*s%>r@2k0N$0R7JM|D5iBX7@ig!~Z>xn&8o5`%?92|GSWe{$pj1hQu26{nbHB z4H>TKcPuANt+UE=QBPPdMJILX1->agkUd<3C&qwHi59bRS zNbZ;OAWA-aF{a#=50q!+4llJ3>LsU!f9Z8(B@YFrR)_U3^KM>*z-5(iGs2xsiu0+2 zHYTJeJr)%(KfQF;x(AhlHs3|S)8qi@6?deV!a&4hJN5KvKT8uKfj;W@p}Po?4iq`! z(}Pp788XeM%_NC>4ZiKu5vk=l8p2yC6^o5xPc>5<^_&usmD;%~n&dT~)JBU+;vRM7 zB;}Q^@gqcLV|NfZS2vhAy}kOeCA58q&_gbzS(ki7rCT#J0(L2Pdg#UaaN4j4q+u9J zaAUprNX@9U+N#QUTbWa4h8m4zV4T=;@mWodPXfc|*>cOQUzh4D4j0E$UB*n9c*}+2 z?^9HKjT`gEn(*EPBW#`_J=eZmm1o{kKjA>AKeo&=Mc~GCxL4EG$$$DXK5t%>*r05J z3hL@=v#~ZRtsrU}Nm?jn5g0-JU8&D@+UZ2=e-2f0g*uwI!j8i%i8~DwEa0;jYc(}* z98JQ!{H*n4-KV15F-^6&V+2Ey0Ohv&TN>&#KMd0?=`F%gAw_6S!Z<6ik;T|cKn7#! z@u^^6vkKq#Q7KC=oHK<2?(i~KmtXO@igFfo=89*y)Q1fYf3%@06e%%{56oPmvYzWA z%m!U4?&%@DzilX_cL(_&lZ2w^36^;&;_^p(XqRgJCl5B+_+9NxcZAf zuP1UG2=rfF8|xk#QR8P|%rU}@Yf0D%F3*tm#n$}=N&FJQkd*V-2I0{E2%C%3l7Whv z1*Kxx)-)%wX5EvZ-We zxIPhdE|{2MgDA1v)2(@DMi%fxJP(xXGLdn| zzK+~Ov=izsVByhxagMf~kU6hejXdY}K`J6QSZ5hY*HKppQPDB|jQO2>GC&4g@xmdv z5@Qg~d))1C@G;KKW9qU|a${!1Gc)?iCH6119n3Wwgl0aR6R?qM2Mt&3?f%ud_@(Li zPDMezN0fD3uo|bK^x`O3KqxS({DxsMVv>+@=j$O;^{SKnlfrL2@x?RwhEKSi zH8q+`rBTXhaKD+rD&4ejUH6`Xm1M=N!0{7aiPJLzqJ@QF${(5o=5KGss99+JD1n1( zrNKYiJYH)Fwr>7Wk6YHsH|yK7%6&iLrc||2kj(G5s*_QeQJ8Y`vRBl*?~iqEZhPVIn9b3mV*MR}-;G zYpK_1x+XZN+RC27B0A4!K3_o(w8M%6#?&kZkWmUn$8yE#IDwB-qd}JTO(DQ z{?hEuj;xLnEnI}KSMINC$0D;%BImHWUhSkQ+$3Wd#$NFXG|D@+T+ufMCs+D*PK^Xp zqPG9Ou3hv#^=QDCt8~eBI=;lwsL7NnT_%@l$cn%{GaBA)>w>i{+ByF+Q_wqunQVn` z>jwFX)^>Fjh9VKY-Fq^jzB^#H7hKk~Y(XI%rDipHCt+E3NU{W+>P=DprIvWbsd1~t z-?q#HAj!{HMZCipS)_8BK(2|lS+jxWVLE$lNNW^ECia3x2%#2+%G@=3y3hiyMQBQ) zABMKUs>UkLs;5=mq7t~+0c(T^1ZeC`gsj?H6 z8eTvge6HSRU+u#*sxQXe_w69YhHKUN%-k$NIJVv>hk5iq9{ZKr7>a~bwVZW5DZ_*r zyHji*(_+YVc9};3k4w?oNkH~%9WZaA2CT~R3?o-oo(lzwROuzk!riD zvMi^&-ZhRBmh_tzf_(AR1I^7>n_apsQWr5Wd-~iYV;ojFgf0ydlcYWPy_?FVxYi=K zHo+jsycBOb8<)RzOzecT+S);crqrOn(?OEm`^pkivH`^fd$E;Qn9mM=1uxWMFSLfi z^AsIDA55RIpE-hfL{{uIV5}_Io7dD~DWih{eGqSXTLUJjE#PGgs!lI0H4wejP)!)V z{ZhbQs#j!Z)&0o!Y>wa6BlkOR_7IP}QGb+1BizmoUbnGHfDt%LH%{iBuj{AWu?g&n zE4n`>zwluCBzZ*Aq|jGd%zuMI-QRM@F}OWou)+T&VS zOSU8}d58I>W_+kU=`O4_1lJwqlPF4@E7b}T_lue?lz@%bQYUVwV@QmZrvWcu3L`}! z8fnsqPgms2MDA=1+9^7Mcs8d|@zf2X$0AC-@#N3hy+niSRb%V!ep1xpuIV78rJ^2x zwb0Jf&KMHRXOa5@RxbF>r+QRHEWS+~J3jx|QdGei{0V{TXfIK4KSviP>ewMq6x$5X zV7!mr;iBFbetrBWVufzhA^O}eoz2Qi z>t=R=%nlzNmKII`a%&nLGx#=bRPkH`v9MOR5Ou zdVr!pPeyYRVC<_7s?bcMUK%$U;mzG=TS@WOJJSHQdBH%{M*T-yYu4Roa3Kc6LWg6s z(?CF&AMAhgVt1pXtcXL&S`uJG``09zMzs|Q@A!|};*aT{j=sDo>+VdoKS)i+8o;Wp zrT%uwiJ9B@*r#Od7h(>zaGZyHF_F{!j(tlP+8 z>Uw*CCr(?sQB|5~nTc@$VjLPzMmj3N8%MO+-FJ{I_BgH@gK>{)U*l$R#w`-#z`dd@ z>RIaTSS!3!$$8xjZKY!aCM;bCy9d1=b2C-7hu0w^F#$8aF4z1UihpHGj_5O{@o5Y$VWUA0;6r_dUTEX3;+ z8`IdpCHH6B0JohGt79@cY_vQ9l|j=A3UTmGTnbRt&C@rEwh2(s!{dc?KmLU9ukIec z`Et9dP(wBnb5#st8(OspsgQgPrniADgW3aL0)jWET$koyVa7olI9rP`8oKHJYQf{| zrdT`R$VMesFt@A(0}~AA-fr?7F59>Hyjoz@*Iv{3satMXVG_!Ji3*wP&R9jys%}=6 zWD9XOA-b{%cf4kj9AAm|pRFhyR3f|YGl z(DQ2!D@lYUuU?5K2&amYysAnSxn&Q>dMU0{c`ThSl*=MnqGzlQsCV}{S0Q6qSiN(0q?%eG)F!9!9)DuPI_p zCU!0a>&aCW`wSPCdR%+Wkh;-6P@f$fdFoW2+_6Og<4~E~94nxh5#NWb+8FFt=!7Cg zmL0*toDHZ~dFGzP&7+Y%kedgU#h=LS0!RkBT}@mmB9-hD?D6IahkfKAqmNqn{1ex4*rt zuDnBuxO9M_)&R;62%x+}s+;;m`0(+;pS^5sZ^it|A{Wiq!*r8~UTZFIG{Ymny4alu z>I4w(W0i!qj!*R!No}G|@9L`W$8blWA(KM8RbyeAyaC2N|GSNdR>NmcGuwL1NfhoVZScedNCeLlF@t<=e36)$^FP6NEQ4?)!yNUd%Mky2LkRz?EmLt zP!-9k^l^CkWWu(0`sdkGgTsu)Xn)9q11cX=V#t%4x`0!XS$sADB>MR?Y~lwBx2v^3 zG{K#|+}Ch$@N6AdWL7fty?Q9(l9Xus9222le2R<_8NAfxzj*&qU;j4iYwPh0Cc5$m zf~L!lB+-m8WOw$$T+`H+3pp2re4yLxHGsLp19m7*aNy&tnJ+`k`NGeV-KdGQp+6^SmzVzcDuOw zV_DpN`jRCK1)k_EPgmWYf|T^d>E>TT6fHO|Ke&l}3x}*-{Z~~$?$beHMM+HfN`jeV zZAQcMqOdoWau);NZ(MrCUV9aMG7X2jqe;GDZ(f!l&U7@$DlvVk+%{+E=k0lsxm(+{ zB=UG#t9U-g97>wad|7hd)Ij+}(DBvhLUa5TOi^5y7kBiWQm)cq=Wv+a+ZM2iXi#Ts z<|??Lz0F323C`*rW<|(1(#Cw-YcT6+_8!Ai8s*J&dCydW(vd0&=E02N*h7j#gFleH zZIcH+jcwz^^1C8h@CY0O@oM2?XadEbpL%VGwyWLsh(pbd1jP58hKH*Xo%pI3{Ohma zZY<>Mc+fCSm2PKi6FaRMBCf{47gn!k{{*jn6ROvERZe`<|Lk7)FhN}a2X}LL07Boe z|9m+&GKvv468=g#QivAzaLxN9Zy?vjse4{RNkvy+duR&znt$CGh-Z4uxRu`HJC1_5 zv>-qhQY8IzX)Mec?1svHpySn2+`xay4 zwg()PMOs4HT~|YvfA2# zj&`rMaBDx7xm}JFg!iU!gWHmtd}K3X)*U*MZ%cVWGgGK_1A6-f7tBtNV%_*!;eqfs zGK$4H-ci1sI3Z9!<@*k=ry&+3*xu3??cu4#iBk%o0Rx5 zv_Qg+-Dl{=S+RQTVt!Zs#O?88fqK_7fz2_{2LxUrxoBc3dL zd%zWggFQwt|J!@}2RjpuY|F;5!k-6kKx)rY*NZ|c?f^^m)%%!`g*N)&HgLsA_x0LL z$-JXXiZ`Kcd*aXLfgpKgyZ_>a1GP;}Kf>coB#ysX!w{f2zdS%e^cu%t`P(wkgykcb z%$fB?>;dnf4zKir+MDg4^KScN0)Gq~%P3iu*d1UKd9LZ<8fk(i89wHh!@gC1{oV4f zrE?6}Ap=P^0tUU#qTR~2aAk_3yT*3j5o6x;#GRCm`Ww(_GRP+(z^?H&!?Ra`YLT^Y z*0>dd!K>=ZwaA6e0~UI}v6tERITfbs8@~t*n-Q9V&ad4|jK&3o`q(HZz<2Pkt|tma z{kZN|AtEw4ldwLXXnh~UZS-I;64u5#gOk%r$W;>Gs))i4&bWEHBkS;evvI@G?eH?pqKgx8WJe^XnZ524=?b6|8Wo~3scP~4*kc+>`eal0tU#lsDJ}(l~ zS1K9GfA8m=fDBtQH8ig26`>5zA6#F*uwm5C&Za&IjJz?cS-#>7gh^q%X3Qs6+&dyg z=R-3YC-S;>igi^s3%$)l+^D=AgYCyInOQtNNpqFc^#wh`x&e0H`wPcq%C@SCcU^IGcpA&BgY8cCfZ0WqW(@|oL1<$hdBKcyeII`w@W z9MT=jTr@uPlu#V4&tXGv%o8#%27I^Y`;PNA@yt(G=Wy;V_fS<}%~cy;$Q8X1!`$(z zMYcLGY!#|wFXhf@ zyqi8$tNSiW1eAxtXgR_!qKHfH#@VzI{n%)vqpUxc4Y`xP2jWgL9$27G( z920Z0PXdo=N9!wzkr?K=DU|2AH(@IbKx%*EL}n{)(BAfvV;=->u@gLK+Gkhad!X5q zmZQQn>64f>B#ZE4nf6U(Lr*5af74Yxlt~bGeKg?RGQO73Iq~MIMiMAiCMoQ6W~*J7 zq^MBjb425bzhMhD7j@4s2f$1RE^t|BJGYu-0i2?cj8o2t_h8%pl+G4K^{!sBy#&?c$u^VpF*540f#_?#htEKrw z0c?umg z`N{6Qt(>5vfst{m<~c!ldMvXAjV()VeGg6`LS62F`ekDHhcd%ON}JK>kcf(6B4h1> z#sbsV2DX3)V1-x{`8Z~4uS6~45bZ5vsZYYM_xwp-JX2;Fi&?#T%afKc$`c-*_LqD_ zK)yg$6jST87GC3pKyh{xa>Y%SzpLT@=b1pi6?6vmUy_W(zY*{OzJ~vC{t+ON_*%s( zO@sj(Q}}n^yxD=A;{S2h(?3f0+Iw?Aq1d?X|8>w1APxCi`u<$`B_3(#eQreEjUo?;j0ZeSo`TOWir9 zuCF62JR+EK9wz?FjBi$&?PBbkR~b|Fjan1_DS|$Ck@_4_GvRKRuJgY*sEVk_Az?|x$FCMVy`^IBCjFuv23QX@qpBg%8brNKJJ|>q3~UkloUun zb^*sIg1jxiqLXegE6!diDyvab7lp+05BydX{sW$&`P@4AT<_ZX$(awq?6W_3=XwhE zKY^CTo;p@S;aNpB3Z8@A#k{}2B=e@zi*kPV-`q12_gvnKU?uDe{lK6kDhPdVM-6;9 z!d#EX^o1R$=d|6$c(T(pby?7rp!Xsy!S2(#*r#ex7eW zy-)Gc+joNeVW~%Fe?D)^GBQju4^nizfrTP z0R#Oio7rQTMRo3Rp&qP_sq>7hQdHv{^C{5rM7yL>VuGWv>pJGPQvy$#^}A;T>mJ<9 ziYfqqE2q6-Bt4gqF}tV3OBuDxx zYw6a@WQM>$fr|#YKTglvwNFvIf%IcjI1UK~r0(v>XXA7>;sF=32mT8o5;$v_srJ2_ zam?>%5%(bMf|uFMEr7^`;qM}&Xv${!-3qaK-&o=Tu7dXo{VY?Q@O1g|^&zE?IiYD7 zqXTlq$RfpO+ARSGcLRw9w*)TJLDU=lrAEikSdDmPCYcPD-%HJW;wf}lrua|{Oy;IO zC;it$C?khfA^0c16Pa~F_ss~yx`@|ZUL{mFgr+D%0#GxkvhmZZxmQBbfAJ($pT zEAj#-G<{+zL)K+h`&oCr#BAsxl*tnc>-oZmH`*!l>Eb7Mg`kQ%y-r~|p_39*8dRDO z=dImmPnu5ofYh1WJK7RdY1GN75K@yfRnXTcqJ4lO? zJ6c@96$~L;@M}SX0qh90&G;}*w)pa`5t^34&4xK1zG z;!dZ~SIo-fV7qShV4v=E>!fX|2t&Vdg$ZNHHPxg_nsUak5-|OvFp1K>U zr!%ufwrqO!r$^F#4m~|uj76;s0()JJN;;+Jamw%DT z+>Q&#R9h4R{xR4HIG@o%!vNKiU<6Cy;yCc804gqrsH=YVY0MMFUf^^1-Tx$)UX#t9 z-#??nn%!k|rsb~alm=J{80_UcVgVQTgGtCHSt*GUM^wi8lm<`*;6~J?L7rvEJJ!Gn zYQE_PHtn*&IeK3VU!CZ&Oc@v>hfPjAv?#-cS(cq>4Sa;LU1YE$cULXlcuRu&*|D6G z`K4E;3b3CnUA=kt%ku{;il`oFYQ+oF0PG*m8W1j8HE+b#X;V7zhas{+tzVYLMN3DSwp2`sNyW!YYjuxv7 z1O#MxA9kwOGFd-(xeg99R6}M41G5BFMuU~gD5QQ_7Ebay6Um0^Y-1M#w+A|x(s&a1 znK7lolG+PgIN3e-35q~{e&K%helc~RoZ`lJvcx%!o{7JUn+AfX_+LtqFTFSZyNRZN z(e&SPdTqR~yd%(*L4A5QpO6Q&Ya;*Pe66{}*q9XLK zL?6(}<-eO!`&*Uvr%AB*uNzdLna+PZ()7y|{2L0-CgSU?Mt`Ll0g@3g;QT+O`~IWi z11{Si$lBUf-7kX#S_lDW4jc)nQ2)uT{(187UvZ#|$geyt;N$;FS^hUtk$>B}{~6o; zpBwILC!9ZxMgITlhMSTJupxXnFLtm=!L6pQun?<%de+v}2ZG2i&G*091bY!6exR|}q?CGVEj0*y`lk8rhMlgSzHqYx?*1%ngcCQYD*V#AyNL4|zD+K` z`Nowi4ef3|PN0jH)l)bE__ce4+74aUv`%Muz}zf8VMhh0C|kpCv>(H z01`4sIuvUMuPpf(t|8!u;%V3?^SkhRB46II|6EE{0r}Zr?bs8xw#n21KyFD-lt%~A zmb0@L@{+r5hc;FD@ls1v$S@@hWEA|Tq1%+Ya(0y+>mk{%h6u$N%g;+)rKY7LzME|> zYg)u`ih3?=-q(e~v`-t7?3<6KHZAJX?IXknEh~zOnz9@Q?o4Lb1~f#SlV4}7taqoN z#~HMG(a*SZ)SB(;0Zn?5W8x+jtTkt;7elj8eS3%>@%|+T;_I5+2#A4h;dVDauCb!t zDOc~0?K@7e;_HGqSwxI+68YDmqG27aQx(Gv#9oJ;fdy&10^vZgZgUX1r>rY%5+ z@w9=RJ4x9uef7I=ljM-mZd4!U?eb-9_UqSBECqXn#A4!obIoUN8n7Eliv@wQSDo7D z>oaO54)jS-CL7}#E)F4l3o~Y%>Dz@Efg<15aJ4Jlo$tnu;J8*dX1y!$2eMv=Ef(2Z zBJpTRqGO|~oX2|mi!jf_X*iybOC1}VKd7@WM8s%Z9p+sWny|a4X!yDK1;*Ta(0MfH zPIX#*=I)>mZf;0v3CTUVN_*2OVEN+lbi^@B@9v1!Y)*BFWGm~tVRIGk_!G*+>N>7$ z<)JSvd1!{p9cu{+$P$ZjX$|G7zUcC@iqi97f{Z8!xtlj~@2mvDa_Z-{j(|@?TzF0H ziGVH;oe93$iYyU($fFK1^~dkdN_a*rrF!vSSE@o?0{XglaYz!EvV?UtHJ6ivhPFz4L z(m9@2uE-h_f91;sgy2DGsIYg3RtnB;bq=haFNh1NywdmfgxJD5B#mkZBIun6DBwQo z-e>=CnvhFNF5u|*lUF3M;bkgm@Q4u=0&}<93s%z9;=t;!@IJdZYHFa`(3-xY06S|= zYrsJ+@JL(L7;A_S;4W)&pMpOW*t(+%(q4FDsaA)3w+(Bx<+K8|K2>)-g_PHUQ3LCS z;Os6m=%!}8iK`l0;sk-6Dm&xue2*Osyv80k&Rf-MSiHiP5XD-D7_+;B=fA95g?%!5 zL(3V8%I(puF-9miOl>EZHR^5G($#E!2sTbnE@2l`NxZHf_XPU@abT*CZBH_S_ma!` z`~Z(ig*53<2z059nJJ-`@;1W?A68}V&DVR{$6a_E@ct|<(GZmLC~I@Z8H=u%;VwK! zF|xer>E8gf}maWQ3}R^pK3 zj(=*L-V591Su!tw-n@(rJ@T@5p@;VtLu?K-qR1M!Ht=pL@;sTq(zQUm#*hp6?7MM$ zzc(GbVVdC`NA?3*iokL--Yvogw3OGLj#Z>un1413mX4ivx0Iq}LP}W9qlw+3f*m*c zC06Xkil-QSL!X}^dpvf}xwa7KR9A;;#s{zz(MUywEybhdthNf*!gA%4#fyMAU)iS% zRF0(2=c1CK8Nh!VqA*SQWGXfn#}i{j@aJ7Jd^UgerTfHAOKYXuG?ceb>BUWtZ7|)H zW|w=cq|m;!L9m!6r{2?-)oFkYKj+#2uvef4m$f;bEvwPHb}q%r*B{oF3pbgO?re&F zW3@HA>FzbViH{Fzp95mTBYuRT!DO@m$?SCSx%P(18C-3ZcM_HAlo+o4h};rDX>A|} zD8PruQWn=n1`;%>!Ov%2fqkPCKfmIlA@gBMAt-i-&&vKFW1gSRsfEaF9x%H8hXNIk z^$^4HJe~`%W+IY!QrAC=C`i}t;73YC-7@&r2a~{r20+~hD|NUbNE7>uQ6piG=F+|*2XTa z1ssyFzl-!{wPu^;oIRF)R6MJ9`hLmBmN`;|L2JMkQ(w%BpSI5QD{m$XN<2Q{#`@fs zjmNV;SRG8MQGbK?pZAQc>gaBIsO=giqnB$i#N3`kAFoJmRm?aTf-fiPn)J0F?d{&B zZG_a$@4lX!esI0p2aj~muV^X=)@LO~s^}!5`kZNGzXC(wo6>R9uP~7T_;+kR_vU#; zy-%*~J-tN@i7z=VT>>obmL!aifmnL0+9l1tQR&t>;8;fpEDvPbP&k=hanq{qb-g-I zO5Bn5kyY>PlKMq5*b4ETUJ62Y5W@*Q!n2Lr0Z2mdPiirN&ua4B2^bl`SrArV5xxiw#>S;Y9YhsVL6l5gyxEx$UU6}UyxX=tk z$l`Sh$qk3%;unamqpL4mZX&MimQ5T%Lh!!T#&zmB#_7eAn#1*%+M6Gp znBLaSZQrBq4myZGQg?Au7>gaEV&HdywL>n1A#{Zz&Fl22DW0zaSug@9jA}`QmAV{A z`(~fl;)4@@IPd8mtB5-d%Ntb^SVRjjXPyxO5a;&kmbXq>QQTAbPxW>@8@7bK#u`0o z`t>OFm>O*}78~gghQXlrx|%aeGd-O?yqW3^8RQ!$B)3qqZgf3x$@7qMafMM+^;$_0 zykjdPi4e5tfvlJGed0}JRPoF3MqL6Q^Hc()IJW!WaK?gBJ@-FE{Y#pz6*P(-u zae9xk!nBUWFnc>mTsX1j;-Y|#QPSzc($6@x(efl}f16~6m#*xd^T2ty@Uc4FGgRr9 z$nfg5L8eDH{e6E$y-b5v704HA!q}r~vUqErg3M5h3IV9krbM8jQd@L$(vEM$?nkST zE45=0kQ2p4iImfZS;D{2aLP!K!PM{>J7emt4?**I7;fpLakCC@pT|LgyQvt4Z4qoEXEsTtIn8!Pz_n6+>(kWQ_%q+8 z(NL>g-3LUkbm`Q@2w4!H5s*2@d8OlPXRHu)^@zQCp&C8vaGI?Z&V!nW-V(J&N<sw8J(0!dXHS2)lNh0^2_rrrZ!RddHEHyMX$WZLldRz^V^ zS^mY zE9>3uf}JliaK8pBWcB*kHiD-SQ*>;Mkv*pkTNNH$}ttbm7|z}rWlexLnt;) z+<|yyK>=ar#Z=aup3U|J5An_p@Z6@iN;O@AgZsPPsCY3V76V!b0wRHNmf3pOti|r{ z@{$G`cK%nZRzmRHb@R^&KcFx3c!TTCg)^yRI(xw^y)JIBoiXkvuhdR#{a0T>0$E2| zC+g>U97D(j+V+g)pJ$hM;?YMgye;7USdN((DCh-7Fw$caKh6s!Mp=*_Lyhcj_6_EP zPBQ=jTEtei#>z%=Ne=KZ%zjY7hIk%93raeww}FKWMPByqv}>}!jvS@h_Of)P#%aRU zVYz!d7e6w%D}LGn!*m4!yc~zI7&Mun9X>ZlIlcILKoy)x0Rj6ohS<9ZhZx1ZGi?Et zcq#UJVo5Ua73$^;n)2KgiIh9x+%+Kq1M^4Q4iis)rf^VsN3h2a9Y+2gTp`XbP++d`RT~-)Voe* zN&6_W9EgQ$t1*fegU>{`DdBS7=uZyPZE1w;e<`=9FRFa7*?uiSidoqw_!0x&`xy$;1VOr1Ym8v+kp>QjeOZbF zJ&oY8&=o<{#{z0u#pX|*6fVs3!;AH2t|QBJ&dYan^o)#cv0)db$O-9}o$bEcZlX*} z<>%}Rh-v`Po-pp2(u+N#NGLK35U#tn?*Q<6+lnRyAD<))#cz(JtcC|$T0wmIB= zw5BDSH@p&nRk!)k)^8*|%$)$spMIF}Xu;@Q$aHP8TUH)a8>M`_TxKwY%JFyksVC3KJsgaQo;z<6LNH1h$jyMRzX-7N z-8W1Ag%>tz2%y2hR^!Njl~G4;3s?m5b{0l{s;xML2A`c9+%G1npcZ(aST)AUxNhAg zt%~mQx^8@)6^?pozzmzz9NV1UVJncZSHgU{wo-h*+B@VWW-7=HVucs3m4sC4-m}66 zUt~YUbg~=@d+UaMkU?c=tJHzhMmgiB{mwDl_qbsBMN+N)F)V)uzUs@BB1DuH?P>T^RA+?c(uR9J)~+Jj zbZ%HEFV%kl<>@=uSp?Ij-n@|WbcJ4?T$FFF=JbG>Td9MY(Z11sC1qf}sH~7a*a5mf z+3)gYZ!r{7W0NUh(lzDRxHA${mF*pFm^xq~&hB(R+pGvUB(y4hAvG|M3!Lj<7Jvah~O8l1&MSGjTaENk6cXMXHKenOJ;nm}8gI_$!#T z{oWUo&)-q=tr0f#34wF>EX;XUvjT6$vU+?`R1T(t>hw}FpYC_}9?Xb+Y{l-IbuMMA zt~4GlcFJk=c=3ABaK;7v?v%B>c@tqU=u^1K3=2Dm<82J8xwyjvGILepx{p zlOOB3y}}bTms1|A7a_vBcW|CWGZkh}FVG@Ff%cmW}_ zq$^D)WHjg0R7qH!Xjb5LiBHk2^zDpoBYaUSId9ai1H$%WOA0wC=ihH4BrX|;zq(<~ zJ}I2j=dPeuA+MI=Qp0R0-J`U7JWgdk zgNv8>|Kwa#cv*{*<%ZDJb-#hdUJ`--z&o$Oi*Olopby!CV3DvjW1Gmpu(MjI4F9Yg z1L(q3EvYZ7+S=BOBVM*BFqOrUtnQ1j&+X~y8fF43+?a)|K>@nDyu;-NXWb2zQb-kA9LHJwm%cJqI+_vZ0XzU|+5rKlwJ zsi-VdDJr3oEz2ZD8!DBujZ(=nCS;45sl-&WMPwbNsKgL6gTaItOGuVs#uz&pV{Ef; z&!zjmpXd9zzxVU|J+IgA&)@S$@w&RMtLr?^<2=q|c^~g1-LIzaDEIthvp0TM7k#v2 zCeq!KN%4709E~mjjPpDtLnn5js`@Nc9$=z=!%Si`d2bMhmLti_keokEPJ7q=SU300 z`@{U@i}ZsA4FNu%KGjcmKlJe76q#WTVzDiPmO&X1=`K9h%&#yN6*ylR9_DYCqVi<4 z2s02fY1%U0>HgX*?14s#bC@y6yFeBH*1EUSmKJX6#`V)NOK58epiW}PsU@NSg2`;c zZ|<)vW+;pN#)D3ZR5Qv(@<$JBgf!2~*`4YQyXDWGt^dHYdfkR{UJ!XdHgoVFrvni>OPp;Rx#j@6-2<9pR(|MKFJRQZLZrOAT`z^rsFKWd`q6uY1%#8?Z{};Hhb1e>*aq{Q zw!qZz3(orVD!e+nG~sUzR%Nh~Y1;4V^b4Q#48vt_mzK2jHd3k760_b5pg5zCjHv>9 z&`eQ>S170 zlw=hA+b3B$cRu7j^VTez`955%@%W!7(?7^+)B0*mwWtR3_FB{rpMG@j6^?@GuHzIaF~^LP2u?XaqQltyr2YY8potxjCHPQ5?1Xdjt@ zE0jFP?z&e%d^_AwPx$JZXm)J}!_^_uGd;J@DQLgsxwjHISYPsceTt<6L-WUG9Qj(C zDl0*p?8e*@4Oud1;#{xOx=%H6jXsXgx5JS}(Ss{-mo!puzfnx*U%N$R=X4AMkxZw& zMR-Yx`pee~LoK%^w5eO*3-1b!&jS_n?R_B*L|FV3JN)N0X|h60R>z%>;^uJqtoN6s48iHP9XOwbB=Wn|*=A0Vc+00l z;%0+w#go46YG9l3GC28XiCsjvbsN(WGpyos6zBn_?F)X^dNbvN&{C)J1Nq$>PxqR) z+_U1$x?lGti1nZeGaYweg>ObGWk`LVEc>AtUyXR$}`4rw>hHNDL|C7phHyi6%ET5G;AHnNCeA<2Y!WX3h0v%!e%u~FdCUD53nhCQG0=9x z#I|_{kZdV<%G0PaBU8Y}DFkTfYCJLbZ=5a3jp_Vg|Q zQzHj_<1jz}xg9vT-ZY0>n-pq7I#ym7)Ymyaqm`c)p7jTAq`bUS|4=y`y*YmDP^Pwt z{B-e#04M9jn$m;0nRn^O{({q1uG}A)nNkAvjK&%JS>@rb0t^x_Vcx~iB;cpV>raDG z?kP9pzG5=(GI%mAUOmd=oHLTzv9b$6ewwu#489)4_uT=Tv@My4C@Rff+Y7{FW!aP; zzzI{s(#pT%ecpr_2@p&C%aO}|oS6df7YJU>g0z4Z!xT70p9k!ix1cLj!;ca;Db6+^B=9#Jug?lzcl z<^AL5L3ti}w2n;DlX?Vs$-k~zlK8jsxx|-4v72`=6q4%ecklD`$%IoZvemoB%RKG5 zPy!rj*)eR{^_Y6sf8hN?F_cnd){lqYs*c&I%71UN-n-{L>NEkmWjfTEk?!QuY8kB$ zMGvYZ$9kT#eJ&pC1Pw4v^Q~t6o*bGtgc?O!pdjayx{hY)jqNV8jVQiL8O)N$0X$3g zO`MY_#WgT0d?IkH`oFIEjgBhLy@t$}Y*h2U!dw>qT5RPSc1()Qy$oQ{AxtxLL;C)9 znn*RNi{ol#RPjD=FyF29t{Hwl!m_pS{lw07*pFGBGo`HW{O>hlLu9Uyslw=X+6a4M zY4Is~Fa&N|KcFByMSX8XF{60WKhl#X&aQyY-(2tHhxS~5BeuWZU0n@$H=o+iwb13s>u7l)c7dyDAti zHIAP`4Ajs{Xm6;vyFUhSP4@KuMLUdpEI`fAE{kYO_th0><125S2O}F?u?qt);kY z4-8DaPdUb*AU4ctw-9VqYp!8~&&Sgt)I<5xj*G_;isY}B%$?ms!*nRzmlH*2B5XIV zCK0`+2PTgSrxIx!F}qRd68K!;Pd(8_7b4UZ{TbbbS3oGj7#x*BoJWmKW45d~fI3oD zN0OW6&8i-b|7Pe%xKAHo%^Lf)594gnkuzB~#n8jqeZ5c*(!{Vwnjz-qOmT)vV;eKg04Oee6=AKH8L!ecSR0vAI!Jq&vB`TGqCMBx6V9FeRKZm< zi_E-E`O4rIwV&4cJ$+fk@b1mYR`kBX)%U*m#e5XvJeq-=uk;yp3j|tjn+46aE{A46dJ z(tqIkW*58XOVk8WWL;gCL37zSlwlu;@>Cc*Um`YS&QbcK{-Vkb8NBS=y;y3h**JTd z7M*Ozrc}w0NQU=99$0Lb7rm^pRkCeq2UVqz=^8sRHNtalNXL%N3s)b7{~Sb}i;Kvt z68sq00;#$zg@|>(jp$fdUT$6fXQS4kEqk%5Cfl)H837+XNddk^-+N;|S}JB#_K9CU z+{`|Ru+9P|1U~KU<`C>~S1z7m)_8+Hz)NpY$hN+~1 zVJufxiuScv8pVJ;qTJd+$ zJPc~Ay53sNG7GKRV#-JwU ziewvo-e|iMKi<4lu!ra@)pa>nz>N%oR!dID#*>Tns+5Y5^MSu=R#0w=RZM2&gE%Gq zx&KW}Lnlus%jN$XKL%aac5g(ZQm|88M8prXP)$11InjQ?j*7D3DZAHABV^z3D$qwB zzpPR6V;~y8Z&tiEr>|MY<0hy5KpYQgl)OZp;E`7X)efe{o|<{XbpM$S2jK(p{Y=Py zTcP=da^%pglADARirfkL8GCmR)puJfPD_#w7&be$j1XOu(;#V=0%7tT8YiAQ)BucEQdOQSRd+*NyblVAjrgm)cRT8ysmKo^ zt#&8&FYVsIi3y%n9lZUe6E}(GL>%z=G~BL0sy)&oLG-I|=dH%so|y5TNldrYktkmr zN+>V?_(yTpH+}f9&%kD*b=y*21pfRXn+cA$pD29)mDG`&8zl`g>fD{)54ig3hQsu^ zh!sJe3G&l@4)V1ebQ;{or~hzh!ZwL{rOxzk5k=vEirV>bHTG8qa(k6hcH|1Px%JZ7 z;z>2CApnl+6yNf7(E%0mI}UBl7sq*&Q^ePAZ`?*xDqnoT*;lFazAmN*XeWRFWZVzc zuNV^(TLjWUj9eFn&uK??RcbpI!_e%FanhoRBR8)zSo0RW0y*{m1*a2a?*rs)XmTl_{ie~iYL=P0Wc1`o0htZLO9oUFt zlN18P#U&pFDEikvttbH1XHQwRdMdyX%HMR{gl-0?Kt4=IW!`khc%yAKAXa zx)*TJ69xbWJ`4@`+(7lfdHP1+Fg7pl6BnuPjjBBec#k2)LzVZaQDGvMdMj8Ak$w_8wrb6bLb9nx;&Jt?Q203BamGB+ z7F6d>B#JzdHxLWfl?T@QACy{t5dD%0 z&cg;siU1m8%p)>@lBx)V0%pQUxDS~rF%eg>7Sj>kPn>30F@Tt(};v&RA z%MKvKltv*BosUO<<{$2prFv7PC_3L&2-Z(11FN<|7qz5g=vR;54qvD4AND`zjed@~ zRYfb$K{`oV>(t~%Ka7Kc$Veg=h)nF=0I>Bj0m?o_nL}=)S-o1wnfGPVq+sF2?}+qT zn5<%60!TQ1L`aDjbHSQYiJl}}7+CJg?zl#k+u5gXQWdSL{?#1}^o9@S^+RRm0?RnGL_0Z=sGuV7 zy%_jSm7(^|Y6lv7o!A;#9LrEv?;f&Tg=pmTDM5cK&VVBj#*#*sZ&MoDSOY8~A+8rm ze)Q0Shk^RF?Mr0Wg5OAUC_aR`dr}Z%Q5o34XhOx)rRT+*?a1F!aq3tdS+^+(DZL#6 z8TMb*hc;zRVegMB&*rM2=6w1#GW{dU8Z74ZPY?&JvWp-y=cr(cx5tQE0l$BzFh`bI zFK)!mzKv6nzW1+X@YLLip=!)b_h>Ydp(jZh5MZ4-ygt0Cbs5+gWVRFtrG>Rire|j7 z=P<&4wgQo0G3049SVZJqc{Wv{Bklbv4q1TMI_+LmK{8d2E_I-05P|*czeH0Az@> zE+J5e;7~h!m!~o{scTN36D7`VBYb8b(WUqoDVI1A>#R(4;HD=`nDr}O{a}g5=}BnmV{|G~I4LbO6SUcI9gGn1cYZx zpgjkf-^5!ISkhP)m5kunOTYY(wD{hF4Dg36R|=LI=#rBNPKW3V+|jHZz5uan0g2h* zufjSakA_%PZx88bDpF47SeZUT&u>~GZuBn!y*wa@wQz?>N035g9Awr~f%6zSpCdE+ z(&8gzSzz;7-cv5jZ^$1$p!>7S-x*)iE8I^;+DDlwSo$7J-$aS%h z@_1tlfK~>jz1?U@f$3j;*sV_sA@P(lz@=pKh_+&+Z=^q|K4a?5kipV6415t)b3iHr zR-*(lDv%I}bF3MI`=jV%x?xD84oO^apPt}F@DvNlua)kxk#IIsWN8X1Y zNeS)i5y#Mi>Bb$?$m?pV9WU2NbO$=nk~aJCb`1TwnD#}?NjJApKba+dUV2od7ws__ z%q6oZqm{x9;vzx+=Q5u}A0YS(Qc3Qri2(ROJvvNrI)K`W;=b(-S#_-}t6;NhtXLCk zaS9C)2NJy@^5lnodhI|e;;u7;fb`GqbONytwy$}zDK0);9(`1~kbmDdM08()Q{)Zr z4x;$~L?IU^C9&0-eH$kq)s*%kg-#G*#(I!4_j?^i=eh}01c{>xWieRM$@7g)AdhF9 zDY>Fp6~#=s07?b?CDvlR4Cr9G>|b883*V9%{wEY2@zc_TS+M)y<9)*dF%~UqE@RIB zzJp|AB<_^_H>Xb@9YZXzBs3bGt8v^q?P>9aQC=RYE&2kSe{k$r0@ENI0vwZrKukIj zr~>?j2fSg64aLQ*!MCxe;oQSA^4unZ+%?_~d3q?Ejdx_4l1iS%k!_G+-4jZZwkNJR zq<7|n6|oJ0zKU~mt1uqn+@kx3zI$9!LmyTeq|bxQ{e+Lu^+V;-5vfqm*OUw~EB;oW zw+JzX;cKmA4mEkabz{Vsf(~(zO#6to7U!l>UZ1l1)**+r#Yj;B8-Vg!tu%YTVrq-} z<)OYlIrX9(@Nh_vG)X#+o{3x2xSpg4K6793LK$P_uA?`dGgUZIX6ng9H!qYSzqx9c z0n){or91(NS%_|q3VOvOIXy0N6hR4Eac^>v4H{HdSE`f)ZTEO-Fet{j%HuJI{|Ky{ zai5$Zn(>t3&(GImNd~wbf-w~^Pa)hTq^($ShRW7Bt{JMp-HfZ}5509PqnE?xFK(IK zhH%Hy%gcpZnD=XDEtTBr*fB|r4~L|0zHrq-(RJOweUVwsD@Km)Wjmn zaqIsjdn0dP&I~KVy6q=4Bb%O%`WS#XlX0M;+QF-Z=m)AxD+81pRk6n`ZfpqH;UmUl z*jS(K7P==OyHdo%5TTW}Y$ZaoM_`;HymBlg&d=<=`n?V(Fu>ue`j2G5{`~57?bUz$ zPm3wu&+%ht9C-1Bdbq-eEtnRJTj`U;R#bzgR*37t{bd!!&$dd||M-|Q#$hf{$lz!y z!G2q-^@BPbg>z}M!m072Wu1aorPB8O_0*xssZisUn5BS<>bN4c z6X;?cO8KalFzD%GQFZGb0*DL}sUXBw%c+h3YSIZ0f0NmEsC?96rDOsrGztOTEsug7 z8X}=(iRi)AqhlX+$c_uHNb4Uc*{G%1}g2hkvIVXuF3Y3+iBK8{0=y-#9F%yInux#kNLSNZ}Llu;n^jZGGC@^`Y|6>nPOyDGk93u{T+! zT<)4NUb-k{BWF2!f&*(dhFIK-l(@f4DtfLWAF<{h@A%U|p0Y3iPSV=x4(gK?ld$*2 z--)5LKY}?tYFbCtU?CbM0|vqh5YHrB*H2|hhI*Q`8dD)>X$;C#ri{7;iyPrFK93gH zgNrFJmjUtf;LgR<;hU;whb1e@8mi-&=Ok{=)vU}i>#Zyzo~)VOXT~#SkLJ%1DRh97 zaBdzOSRZM`#lK+0+u!>pbK?I=8s8rVq){=FqD^%IyEsSrn9nGiIFg1TzN765QuVT4 zRN2$?&Ek`>QDQFxw5i1L5<^Rk#Fn8}iftxccA43IVV4&Ck;Wpb(d?awY4`dLgFc~! zLt(s!)r#-@VaWsX?g+)wUd5bDr9ZU5%NQ-#lR&-wfSvzR@pweDS3J6?i70TCK6Ko$ zDj}*2%C$=RJm6z@t=^{($qVrXy?&Khz7?SWMj@p$i&nu>UcZ{wK!|S)dBAYi_|gvT z^|(N0F>#L5Nq=mLGli*gyE-iJ)QTetT+<6>I&0564?}Y&LDoK_95rriFGJ?g>VNHQUrF7Kf)CN=q(k;nnT$a6+DG$s zhM_~7)#c5G>hjm*AQO@h!$`>*suA-cV39gXvFHlTAnC_;i(T~=we|Ffd+}-wS&D+k z9_4Uv^7q&^7Mn*DkyDBx80~g>@pr<)kv!h?p0ZjFuTqPwI0GWDAzvaW`HAYp_47y( zQ956?c%MqB5}HJ`Ne0oPa88Y-iYiFFmfOIT#|&H4v-n6OdYm3*R8SeNBJ8xVwNz1N zN=})&D07bs9uXCol_8>}jZ`2Q7+>=J`ta~(NnV+DI;}I=(bnv zy4smbSrIqNvt9o)H71?z~z zA}$xA#CurN#-fp- zxCwjwpW61ifO{@P_zZKTnybQ{sqyb97pileE*Hl0_tiviV}hm5Xcpp?3s3!SAqb!h z|4$woPc&9G(HqQ%Niw935)z+;9MKwUua=t*7QTIc?b42O)QwW>H3zDqp~Z#a3j7NY z=N&2dvYK*eUEP)Bk0-V%Vaw>2yz6%F*Q4X9o}n95DUt!k@FJLwY2ZRUQw zH$_=g%}O-DcHmU5*U9{8@zC~h@XQ6Fzk9}%4`@A*#1;Y^27g5dI0=Fu1yE2c->0F4 zXoMYL_lb+d=_-frj_zKgM4o?GYrm~;O|WlF$m>yqvfAK#k@ONB%2O^XUcYhfty7bz-;pom`^AH3_69$+s; z!rxXijN+cdQ5`^41+xS+<_dTnRHJ9v8K#JG%smwSF?EnBLkgT6a+8NB;vv4=B|4a< z27V;I3AqGV(Vl?xZ6Q79XqYJ(XA_Ea zL(kT~8TuoRqzltA;R?{7dS!C6f<5two6f*dZs|;R^o@beVHkR2R%UDK>zk(h=k?tJ zsjEV0CEyulOhw59kMTxT>ab^7?U8ygDt=tz_l}GBRZ$@x4Km8B0A_Qc1TfxO0XV{O zPL&7svYqhaH>uE7{J3&ekY)MZ6FT!(y9L9DAz&JP?kN|Axqw!1lCCgN@iI5%{@OeS z!b4`{!^kjIxr(`pT)jRF10^(y$o^J6LAeFynGZNRF1}S;%aVZ(n3R+2$n`)v$Mnpy zRX|o0%@)ZZg2_Eu%8~}3Kbp7`CMt5Zs)`%6T9Ri`X+A5Ua}Rl*LxR}FkkMhzu%IAI zg)>_8Nu|pLkbwVW9n2lb*8w{xL8P%rZD|jZjVy%}eu5?`q;E=T^WhmCy0!Az86Ns{ za`D{Qsl5Ax&+RA{G(?u_HiX)}TUQ>07s7cFW$?Y&^OWAlf&PtTQ@2L+4scW_#2^OexrX?sd~&f z1;^k9}{`tSRZB&WVMi~n4IAzqS?ygkovXZ{&?mCs<7 z)=+)29(ry&!QOcn3jq2wkz~iq^Z@#m-+ha5P#}@(F$VvP0f`!A_*ujyQyhq=O!T3S zulmBKn}1=0RROxhf5KM+C>cWo@+|=I+-T%RdH|(mTJV_I-u1TmVtkk7y6!OXF z#ZJSUCFiyfGkNdNI-EVVPa8_?tjV#xJ{N#F9bZtLvYNwOZLGs;*Ew{*!ha)Cwrunr zkJrxJ&RQYDpLOB-Tt-2v71gdIRe<>f8RJ7Pd6>}0QT~K zRG%qbH5-oYXsK3Gig}0e+LN^9Rc6yu@-AD;oB(Lu!cGK9>XNxN4{Jj#w&h{PYg@0( zVvAQ##%;0iTv`VYoKzN@e{$U(QT^EzFUAG!0;P-$7^6sM8S+4E$J{YwW)6XsoPq=r zQCwy7U_;?b_qo=^(Qlkcl>e}671waPz--T`S$+i6HxBuD139z)2 zNq8{_3e!5JcEwH}P>Wt8Tb~|C4Xk^kc_|8SDSanweu{%XAl^)D89sFgZ_5)sHq^cO zj&z0s#G=yOOJRM&p(5y%htL_B9_pVGGuYKg>VX6-Kp zE3?T>{hh5G&$8-cR}R?RU3h>0z0*0_=`mj0Dk`ol_fosPO9?eA>t2GDk^K0;+_q+Y zs%l1+F1l^3l{=|>uJd;FSiYO7cU&qRa^SR8)deZw&7#C3ddSH zKNSQ-?CHbZf8RALyoHSF$2r->k<;n&PsAH=Es3961H8+ANV35cKRgzkvA7mjeH*EF z7>ricOEoKvcj#*1WcqL9aj$!4e2vqrI2?T1F3kfnt+23f)F9wtRrf?kykQ1HX^l`{ z2sUuTQ&VqsDK828ZkLG7jY+pmHH}GCore59Jw~dF-PfzHxFL4m$#o4tOnu%OL_T-c zGcq&VkYpDNdu!5oNuP5svPL20LUUb-K}Fy#)IC+C4qm}KE&;P*09hq>_Kn;mZoH>N zYkE1i`;RCDobQ6J&_Zt<#BJpoP%F2x@rC1BPb$^1<7x9bDK^zCZ~oWEvIn+*`HGFG zz4xJT+@8KaWlJ7HXBuOGji{i_cHhp5cvyPHPItBN1k>s8%biueLJe-Yf3?fSnn%ttD5%2-|o%10lzp)qR30rLt{=A?zpX)uhm}dtz zzjhaY+BRas`XX|4awPkqNkAY*(*iaf^wP>Q{u9}>UT84sh+a}ERTvEX8skkxj)w5xa7rCH}>aE=w4tdc= z*n_=Hd;8aY>@X29vgfL4I){V8Vn69b9>>4WZ|@a1sTna#YG!)H=Vhn&AwSlYHR~@v zIcMTKJ*A%)ZDabSgVjsS%Tpi=`5Lw9rqd9D1&l$T6GVKJsPz{h&qp5yt>Xk8#nxmS zRpG7GztA09mAgK%*>5+R+?rVXUzlzohW$-mn}~DZaYfe^;mgkWKLSS{ib`XB5ziwm zLfmA}v(8LnGXlKx&REMUeZal7Z*NO!>5kG0_>g$OkkRgqsr=M#q7y|fRDCLMfOc&QKZXm_}w;Im}d7{ z^WQp7NinYam-}e$_`)KDF3D!+hujD!O+Aa(zZ$Qam<8RiUQ|El*&m(BP@C%B|-{$cTG3VzRrHtpb7=E44FcQRn-&XOLzObe_&j5sa(!mss-32OfQ`GDYs zujWeapWY(Qv=E$bPtQ0A!#!GaT%fR0A@MSd1HN>VYByn?^ffPtnpzU0_yO-rTN37_ zogq`;2Cmd(hsdnUj;8lI=AG08*F1#5qjg`C4T@U3KRE_eV;puC*xa7z{ycEJ=72J( z=p6Dh%zKYU^3&;`t!-(-@7)~6(J;a5ZoIN5704=j4wo4nQse7NQPED@RR|%MIbh90}XtdYSl=1`D`t`o=`So%I&{#NQ zD$iV!_URQ3%GfD8!>2e6wwn}DO-PT@0>?rIQsiX}f~gdS^Mq=8o9Onh_9WemO6k8}{%;IUhyKI}iK`IACklu{$US&F^VAo;b|zq89Rv;Vpv%TCP;Z_m zr)@uflzgXA zF}^dQG$|K7c0u~+O3y~1{V5ENFqYOl$Vlfzd>q7lIcmVXUictQ5HdZoFe@;dkafN5 zx2XtOPz?PnVOHUGWqnWysPS|;XX*s4dFSmB5*u+0cKSsIsI{co-R)=actu1&Zhxgx za@Y0^vKp;`S#!n@aHIcWJlqA*C}QK8=Od`2Z$dgGX2`o%>6p0Iw;7X6Wa;O00)R>> zGY}S zchYkQ(}1FzXKRc?>?H1z&&PrWlT~a^wfBHuIcR*qx%xC{n+#I56v#F+tbJEaIcg3` z`RJCu3N;hz1Bfoi6kV|Y;I3jZ zV^e3UA1Jpq7h3HJd#%UBg@D6#vU)sN?B0Id(Y&OS3nhW=nKG z6{waGb8*C=P@Ch&Nn_p)1h~T1*lXuuwNJ!hlBPV>v=+sHymlzT(92seL4)p2W`Dq~ z0XjU|hg>$8S2*AW>ucKs9aN8EbWQ>y}=J#KwlW)uxo!ZIZ z#IE`~hGgFfEt0_W!#w4qxcXxN=|`GwbK7Xa(N~_+qDHy9h|4zvgYNpqZ8SVj9arIm z{8(pU5P0xMVzh1=G=p`<4fdg}Cm$#7jYvF}LVs0Mr@SCi*;C(xA6Ho<`%>k`yS}D? z^&DH-($r0>HeP0UWJGp<26FH5fqdq#3L4rKMDuxiPnGrXfGNtkM~Xy#0ZiB$e!_T} z&|dwGz__C~K{0OTj4ShzYRzVmTQ6;|C#oe9E!=4fclJf{|lXIn1FPg z%?Mi+utwu`z=MTX;tJebdT*C}J{mYM?g#@1hdyRbze#2&WCg3>Ex>`kw$Y21Sb!xU zH0Y^K4-fBGa7bbmc6j(5ern+FEY0)2rx3@`F|WWv4aP-hqdU^uaubgK4OU5@K*(2cm_n2fyY6Ub6x$W41OxF$^(&1uwq6 z!go}?W+-XHD@Z}9V$x3w(Ym$LAHIy$Mf4x&V$!!GopShC&hyoMdTurZq6jID77?Bt z5H2c8a_O(67Y$+DQHxI-gh0HAbQ*+YQL1Tj*ZNZ7%xM~JbQr=dWn`Fw9!bjWZ?9n4vNl>m(mb*vULhL#FDR@5(%4EBTv z6Q7>?-}tKkNf6}JEySOQA_M+!x~Ud%DJ)A|sb=QRk3q5=$1tK9Sc`NX3;Ajy5_uwL z&`n_Rdy2&XAC2UBj`>JA@_dM-5nN0a2^uy7TJQWqE^)zCNP!3|q`Qa#pGh^*s7s+t z8^mwLF)6PZ=|`0k>VSandW^IXBFPi=pv57an_DdItWeDUiIMVAQayeNL|h|^M-H=^ z>_q}k9E`JN-4E71l=vMGKa@y-$5KSyFmxH5XI-BTH(rXAPT`rj&=XHl*1)_#OT{b~ zw_?RFyJE#-F!d6Qcp)(bSQP{AV!V`+zd&%DZR&s*{J`0O5fbOr5R%L!&?VR}bnS$C z%gupK7DV*^V6gdGpqt@;5>~JaO8QMv40k4d8?N@m@)S`ovKUkdq-_FM|eF7!ks%0()=7)c}PA&8d| z(|Eh<{nH)d@8D9!^0^bTLs+7a1?c~|ViG*yF>SD{oV}g~VF4kz5Kb0byWxwQ zo_%?7CjOadCX?=OWE9xthiGHAo{?|xYPgtby%0Te>_wOC6K}o!cDcE^KKE?=ckXUm z*WmR&`>k>8b>O0L><05<{_4);>SzU}oZ*K>8M=Kr{76Ma(HzFRM&g7Lz1TS-W)a1t zuU@O?3S`QoA=^)wiPCG)@C9lB*znNb=z+HNpVuYgoP5WnY1b^GbyHe$!M%Bn~4*yZQ&}Cc1&v z_Lgx?Dq(LCyw`Hcms(L8zh3q*wOZR-@>FJR)Ok^|e%BK(1A~j+xi$YP=tb9G;DtP3 z7|4ushyNdfGrw$(tqbC&;529N-w&AzpWH~ zXWZ*$Gmfkvo^UscxutTgFPTr{M%2NlNe*M`zOzI0(G{80vik}dm*$WY%B2HRel7Bq zaK3d--%MwD3B;}|_84tb9dVo=(;+=$m~~&V?vmj^zI0lSd`Xtij?A}d#db)%vxT`t z{)_i93VU@lB-qaQl+2W}`SWW*e9rynF-Yg9v7hN-TxkNM?`6H_*7lO-r%21ELhwWv z8S<)hr0hBkQNc+UQd!m077{UeoW6NjdH2iZeU(s024Q+(BQQ-Utx8R8?ZDsHsqgiBgamrZ+X#m#@xz(X9;l8f9H;e z5z;pCELXD7wPr{vV5mKbW|$a$sNpRH^xxx8IcU)2Y6RL9lUt+~ZivS#h6eKPV1~1a z(*6wb?%loGNg#e)Q1Gd?qM! zbm#6P`|Y%Wo%YITYyPE7!%bJhE&X?@skvI2zUW%(Wd}$Ndqaa4AZ0rnyqrQ@k~@exiiCPe+a&?anbS6kNi&|DXYj-~#Q~Y;X11@%N*_yu%_{3(F<^80x_H#8b|8a3e zZ`GCut_A$B4}qz?wY(LNom{({|KCTmb~k(V|G9eGbn*=L$)WxgE2K+jPg!1(03Z2x z#R1%44(x~jcLk6FpR_MOB9~e@EOadA>~kE5KCMjqC+aYSnAQmZ0Cxlmzg1lDu*fbs&Q)`PrElOz&Zc8(Op;!)p)ds zdc0pR+H_<2W?^snZzi7hMvXe~BuLJcnxRO0YDjEilhNRxzf=6o#tqv-#~Sf&ffyKl z3dgHTIq|Uv{!Paz=m&IB0w8imLSO}q(8Xw<3~rO{J>ryR2#j+lVkqTH_*A1GjhRwt z{2O-xK9=_dJ{2uo@XeOsoC&bC=@jrb;D$;y16f5}* zcy`oRXpnYlCfh4lJzf+vnc_=jC}B*-SYczqJdq~vYatrh4@ECEL9^#^D*Xh_12KBk zS63$ov@_JpJDnrK6K_?5DlcV#8td2YSbbaMzu~X?SM9B{l9#pA;&=a0RXv{lC53)Z z@xM{w8P$w7Jgv^ardnLlOT`bSz)Xg$nXX#6@j9$TfPit^5zfA0eG;|vMU`#(tny2P zZmu=MAF0EmwtYRbjm&$gi)++`dk2%-eP$bE)MnYaOurwGxA`UEII$ITd{2f!}m6C}jQy9zLQ0=?$;3?R_DxQsXRAMrUKyvR-v;fVIX`x_&!N z4%{yctY!{Atgo(tCOy^RFp^4~t6gi3-audA25pZD>iFVZbZ2F+QL0Gr-t-e|CnzKX z5_rgTA7lTGEj)I!Cehl7MP5B>s#|_G1AlIZk|0>M6q+A?6Pe*$Hd5tW!>gg{OR^bm zL4CG;1WMZiKQj|g&q4?NXmcALcxJ0Bu-F}2|FO*llc@T8V!P@E%?CuLZif>Hj#Rw5 ztOUE7M#>yg+bjGBEe0l-ZCPDaaN_Wz@Su)|de_NDC;# z(CA@EzVp|m%8ak!g?Q%<0@Pu?jh~oN1&;~up;C(*Cb8=q_J(C}V&h8v&XEmEVwK&YKTBzaOB_9sLp5@BudD;BLV>3_ z=s#%NpV`+^j3oI{hne(ic)y-I_?yF4M(z0qK_2l8&Yd&{d80>!lXgmWph`YCU<1ssVjizy~g=uqryUxU@6q zcI^!M!lYuvjR=!Xcolz^uz^GMt0F3Tz1#Q~iM(>&I+(ccj%AiLYympn`n+oJ9(=4Z zRC*mGH9$iyw>!UT425)*Sco^%&Jw&UI^)s;Lf#G36pE|sjp%0=&bAuX=7Vd5l|*uC zp#t)Bv|=; zeHGOi5~Ks1Jtn?*xoUkU1#r-#l@&&Y5h zJ}b!e$oiXuc_3;Lq&Hak2DU9YDf|z2BVfw>{LZAL|PN7#wk`eg6&E#xgxUzaGyKX!B+6?^UP869? zp}w1cqleq=&$gp#N*MkTFmakQtd;WBi1#~V>4>ww7a}y&3+Qt<9fHGD z2B{f5b@QEMaYcktBUhvoLW_$srM+X52A&$ih~ZSeK_6w%En4j*>*v?#xq0_3@q<(3 zjS|6JkuW>lZ_FA-tXEfZ-hmU!@F(J6KbwsLV7c{*)IM~)50|09*Uau4dR{m~)~Yp{ zzj-}ARH#0D3)IRDM~8SPoxc%LsX$bARsj% zAp{6T2uUa=A^rE@^!I(wIdlH{*0>er+ZM!*W$f}misggBuLYM$^@e|~ zmUI{JFExm>+4q(_7QX_!M{OD$@$=C${8jC% z+}&&+<@+2;{aSVXUh4_&jT`jleHz3>crlc$|14~q8<&22?_+-dQtg!)0ae?LRVjx_ z&+6BnK2(!*lo-$o;5(W>Eu|c^b3`*k(DoCLLnP}HpNv4!3pp&$gP=1c3&~ByzTeie z?!R2iMJq2xa#V%+JIFs@%PE6Fbv3_7Ke88xLLZCh#)S6Rg#m21?#+Bn2H;OT8ATNGv~88D-F5jgz$J zgx)QRSQW~MdOpRWzO23932HjI)5y|kxlwW~bW;@9l3G@~8NqmD@Zr<#KY_p%3=j?P zPkLGP4(81;Nw`>2JwhcGJ^V?0;-~kG6{*XZ=iWq zv*wG;u|y4Y@hbnKu`RVckOfn+C~pxWorX%EuE5LhrcY1nTVzb8J7fC9(2V75;XKwQ zLAYt!s?n}(tx~Tz4T*B9fv}3QBN!WyA-2!Cn8ca zH)593dJ+dUrA*=k8JW!vkczdTi9*Tj!R~4#|H!p_?G-Ab;zg~E9u0>5lNspR^X++) zjlrE#Hrl8M#T)>8KgC3Z6bR*^n5Jg44_l`(yv8#u)4mX8j#B$WgY!{5)Ql(V-8<7a z!Q~$NI)SABI}Fa302}&~JXui2IdkpAA4ot~BmGyz)uV9t?}zfYfOyF$)?lZFqkoTB z+Vxvv6y&c|#ZeL7|Y&M;ptpZ1_nLZqK%SglqzI7|leC$Me0GqQ9) z^CC|IJPBvVugNfY(pk+3wHepasp(w34zr9~H{N93u~vnB5c!R*=h4hXvb;7;>n)NA zI_(K`bKNNb1P4qK#Qj0G))KX{W|S z6jj{|9FAv@iW*LB20!srS2eDbqLhv$kdbc{z|xIs;>F_@v`401^~3)2J>u0k@pu{g3BK1zO|~mmvbXv z?Gd5N-|-<}(z;&4=hl5zu*MClVA33alB~LTP)>>;eIcBEp&%Y{yxF*caOF01U7eW` zTHx6?nJhOizh-%_tdpfiB{wHSlq6i{L6aBh=2`I(#Y2&|q|0+^JULBKY8DjHxPf28 z?yyJ}Zc>78q7*%Pu0&Y7Fr9&}_G5~rBC7+ma0}k!XG+ALebqx>UP3=6Y0ut)USCc_ zN|lE4j+>Bz1ns{BCqnZu1`3hM?{jtay%8&q+;nUZY1;G1%p0dcf z@RF;GUA7iXb5UXA@Cw^S7;oJtk1iesj*1TB@!Raq5gpgSH9QL(D=JcYYvw91!MAod zPkZCcO_R5*lf&JF%B&}f1r|6-Hms#a+Cg{{etvD_P{}vejY*n3EmR7qJ(6)+#_d6& z$6!!`a>J-4x>8AcR!;TmX?g!|m#Lp|DBH-&;71cee;h`bU-|~w{HE)J6b~4{hfGJGYV{sC5b-N z4o?Pn^k+LF!n$>NXZIXZa#@lecS~S|-U->!=NxJgJ6ogkc%Jvfs1aiE*hT01`T3=S zP4e0&5{*+pxPu3$f20kR0|)d9$9FknQb?O6CTUO>l2obw-LZ2upy_KSBS}m4PV+Z+ z($Wbd=+@evP-3t;{$-oI_Z=dc-M%^Gm#85jv@g@vvd;(pl93dw=66kGMyDX!f*tjA zdCP-)&e2JgJEu7nrh7XU+OsY1hxtSp3yc~g>|dvjXm(m$1ZTy6ae-rUH`zAc9>n>5 znQZW1Zi&9X?7Mt~ZvT@7TN5`KN$D>ca!$pz#103vrrDP?G9CPX?A53kI!V*X@*fS0 zoKR>T9bK3m#14N%ZOvq}UvF5U^LAcM4Vi7Y%+Fq<3NAY~z$$aoHqrdH0joO~P)f|; zPQTNgYrIxbSYA9}(`;ysNhKtaD1%^p2D#;-Zk*6iA@*KCGi<`&*>{^rT(JJ5gDZUc z)_(2c6@;bAK`B5vUQx2%R&R<@xN4F?FDd@-3g<4-V5k^`f9TDtWgu zf^8=y>BbMQ3+&vI{hCw6=`j#$KhEK`)U|%Rpb-w;ZdrJb&*-Wog89vYxad6%AJXNH zAA0`d(C~z1}{jb$}5-Z#aHghp#{H~!VwS8MFm7$KLrUt7FlP4{IRLdT_ zY584>GCTOGgYSn>VWJo@8|8z@Bn+0qk{PqL-&MRL6ZDDD0M#Uopi-hs0Nk8aSwVRj zlrY#dm{JhfW)6WjUM7yk14R5ns|%pUvLTj6;2ev%sXoqOb)C?_u4D#cOyZGiQ++?T zyV?+6(_xn$lxwHPY2Avr9QNaIs84se^e_b38H#lq(t?Cj_nz@_qNE*1oq_|In|3gm zT;>(8gnKUVtse7EN)r(K)7&xA9$vIb?+B zxFk~vemhyMrqfYOv|eBu-K3j&gS_uG3x^LuM{J-;#y+6a!&QKHay(8YsB%d@xmrJ0 z1qTVkR?`nUICZznusjG?LPLexkT6eC+VP$D5-4oT!mSsBcLz5AqHZ7Ww^32q_sei_ z_@L5=<{N`=07OvZechk@P@XuP>sX^(e4yA@=N1lJW)bZhW?ux+67;gW^ufVZ#Z;Z) zgH&V<3e`{P9N`*P@Y?3MWV+u+ zMvbTEu;1a=_Ri<;g8qhMCP{qYyKtH{U?K2ZgZV=hraP}3uod_Q9^x$fnk`r%iW!H& zdLxtQ;zl4Fd2e!|F4e_SxVr=e1+ptQ`x+qd^Gr!n0>`OhJks?FXRc_5B=C?Gu+&rZ8!(777bsSiz2R1aiu?*`r}1{@jQFn(_PtypXaw0=zW;8&jlE;V%7IP1V~2Y~EREgbyNeH5 z>AlU@oCf~BhAfEvr)}JoqVk(&y)MVZFqgH{54zyX9EihLNL6B6jrW2wmrL1Cp}{ZP470J zFy{#zhKBi5cWbvu?yrZB%RLb0pOo9ca|@JTNO$pfsx$5d1%v8>V`BnqiQ5SyrQu<7K>Nuuo3T#&7<*>3+bSY^SaV6oqNiySc_ zy}3mws#Lh&`Kl=5`NTbKk%PtcQU1To%5VdjN*LNfLzpD0s`d zYX)L0>UP8Pgv_l3_pkOgi>9bJA7F8ws91C$g0tiiW}HYe(NS298{ zA|+agly9aPh*^A@Gkob_T$PkjN(+`^r5q`Jgwt&iVykO9J0k6m*%HSfWz|Ksutfto z9@?KFQjs=`fExE<$>q02b*Sts+hAl_bCHMVF$h7@8C#3b@dQ83up4<eKe7b|20;NV__l5W3Q5Oo_`quE=^(X0s zSNnL9o*cpf*A?)>#?0;=vac^!ir{UlVqoG!fNTe=>)C+q`(p@pZmGv$4Bo7G1fJ4Xb#qu&OJVerA1#2x{>q zK2ej*phAT&orGcn-#rlz(kFjI{Dh^sgq;TG;g8f(cpb1a(-}F4MX_*e+cY~GlxQ+4 zx@M8Ghmdv0yh-eJ;y`AhCM&#~h}J`kd@_m@D%}Qd{RQX(k0!@j7ltx3kIUC2x8Ce) zP*h;Lk)-k07|GvBU55nF^pXp+_kFx_?5Td8Q)e7A`T;#zPjf7Cxs~)Hv6A(d`c_Mj@T9+)#AI+ z<#}k|PVXu?-fm}D9?(CAT@-iPwsxRMKb^R`y0^|S>*0|ma9+_4j*@nAyK#G5Y%1Hg z^88QuLc-A2PPvpw%u?yYElLFU+l!KVfZGKRtzs!GDRexJC_W&2CtY$xhP;zg#`zR} zp@oO0C$kDM-=8J_NZF?9q`A!U^VTlwmcv7w1Sc`Et2y;IfKx`#O5(t_SM_Urzwj5x z19Y=zQ^HC)CxrHJa*kcvp^|6;%FAYK1={0>mP|+RiPi{xpyUBc(@*N3mG=qxK{=Bb4Gh)u_zV$X(s%z$+bGYH!)0(`S zCZu!6M=F&t#qmBQr%;|9dK?e-NfG8-NFRuTEqVp??uZU?$1oM2YTE|V+QrIoNeA(E z$0E@Sv`H6H%qyj%W_hPmqXhMZlRrIKThLaRqQxwODZ^^UR5trb`WG|XvWI3boty-0 zqSh_d^C2qSvqxdO$L#i%Dfuy}Q1MIHesA;-khJ;2kWov#mnjS_CMt>V4f8Z5P{M() zyS;+&Gjde2Tt#=Sd z&ASCJtc9^vxJPqi>^!3+5?!VY*lZg4LwN3QNctLoBHaJB`Tvy;K3Ye> z)k8l?8-IDLC?&5|-LkxQH7qWI#VKc%Uc1sB)vW_f)pnK1>5o#9d?THC-YT1lk;HPu zBdV05;}Vh||0(9ySynxCiB4OM@@4KYZir_<)2gf`v(j$-yvzZM0xjKPA$0(lC6itr z-^UR++yJR+7vdjD^7OuB*n|PtiX+5&G~Hk9bNr*n7~as;N+sfx?Ajq0O7FFvewZOQBc^GV4vn=pxHIBP!F7IAkj zeG=`^RZfEAmPGU-U-zV=g?{I}fCFHiR+c${r?#&^0^q~&C;xDs z`75KQzyvn^FN9i?cVo3ib@EZtFFTUlRWLHLyNl27&nB)~yzqq_r$7kF#WD{##XmpB zKDoV%Q=PQKEzFwkZSpGKeW}N1rxH;oh<0!TG5l1J`Tj_6v}}Yaeq#eeLstph@S}o{piMp7V4YT%u2e1PLa5CL za;f(KJeDq#JfWmxkm1cJd@#DPAy;*4@ZBh^SM5aLZSc^=Fw<~3==qDy7wXj z!eUa2S&614m6UwPu>C_19Kz1Gzu_SqDqG7zBh!P2Ob?~FKRgYKfn|+7Fgaw88dOgk zn#wM73RKB1@TWWYTD>gh-#2(N@*ys~nU;nso$(9D+(c6~C}#ubn@u_J zmxR822Q+}^u-0VTzSQ9Wsxz1W!|f0%QPtaPdsm^g4R~75*5yD-@TRU~pQ4O(BB(tG zOV5fJBRDO^d3O!PQ01GNOhjvqZpqN{py(v)9K#7R{$URiA$GuecDnFHSEkXEG<8rG zSF&WrqZWo7_3LK&jbqvfwk{+M3~dv@{zaVP)-W~BIc@(#5b}B4Afj6nzmSg8*DCgL zgMLGz6j=<6q_UXS3NnphL|igfQzd*GclyocoJqAhE|tYxNZKpcM62>Eq*QtpiV{9> zL(Saf8d>HBxtD&t>6u;FO={Wd==;Ex$r&^qVL6Wxw;*f9wN1%O`1pHA8cv}nMl=ZL z+ar9&w-hIUY_E<@l|dIneV-cE56YU>Ffl*2XHG_)Rvb5QRt>tPs?OK~G(rF!>Aw*T z&!8XUNd6ycYxufXACD6ici~H7EKWka1hOeViO1-VeQ=Xk@{CxgmhaCL6nf3vCi@a2_z8p6B@YW7&T0kMJDj_w;W3?CI z9Ro07I_oNf7<;eZQ=(;E*7-|UIKsaB0whm01ev zHKp{6#3;io95{flw*06*zyxpqBYYM&d)fI>^SEo-N?4?mhnMVJ-UD;*V7e9kAv5(PzvR$u}6f z%xh4dR)!f(Ua3fXg8WbhH)CQrnRqU4tW3VT6tj~ImO5hhaz>kKwu0Heg(Dq zI)OA5W0$_Tz&d@aTf_{|oLi91?Rv0I0YfUeK5(G;o@W7J&Ze6*s+?!cAdAF}moBKH zUxwnb$y#2j6mErRu~_21_&JtH?V!Y0C0EzPviqKt-vKQRv>lwlcUn0P|L99cxb$m# z7ubh7;k!>P@AKg0sif36HcSa?RQM&#bnN9a<*zE(p39&AdJrT$NwC~kl?Q40Rhf*sAN%MqR|s*UR0Et20A4)2X( zEXvDrU&#jFw?CvDm9dFn3a=6c0p?+{{IQqnC5P{0)?aUY<|a(SZ8jUqIAJ$WbY5P+ zsDQo`A6sRwCTd?pUaPs;#n^yaTVm*FcN=O~7ri!IL34v2D&1lPqkTXXz2i7~Bo#O= zsQSGGxg6H^8qVrpb+ozWz?aTGgB~ve?*>O|;15FrJ9~@03Lm`ji^CK?#5CRu6eCTy zk`W%`jU!DQRBH*L?&lvyqvm_kP}1-E-hh{j0E@y7ico@@imReHbtH=2fk7N3IecDj zdrr2mpFYv~(V(nUaUFpa1>M`YbJs>W`-4DHv>hgTYK}@JRKI20>3qA6>n?r&En|yl zr3hkk6)^*YK8dox2vY4EblPIZ<`&$u!g9A~V%VmG3r5N0ams z%~L*$zGlc&8l9VQ1|7=eKJ#EnM21^D>4VeM-n8&Zf!Nz>I1`eH-VJDoH4n|;r%oX) z9{nW-cWy@oy$|;eTs+t+BTHj1i0$DaqSdlZu12jO-D|O~bjMu|jWuf%Ue(57u<;hF ziiz{BJC`96P%so(s1~8WHOg$Y=(|97sUY|x3t`+^UDtA{$n#D_{wAe5pq}`5Vcqfl zHXT4iN^8AN24=un1FW@^J326g5Gq0amI#8f^WV9YzYD?tqW$2$nc{w>_UM3?hV=)4 z6aFu?yHk!)>`}!cXPy{V86N)eqnA+uGJ>t*jwuYEZDmb8c-M1lAe+p;Lg89Ecwz0g z|1+WddO(&Buzrzy&WS~YN-kS+bZ5xaeIk`09w4Oeyt;Lu1$eGDyP4wIX zm<1W(po<9vK1-?cQa54W@nWh)XjL}m+2#(2a^c1IaG~#Vmn}69ggUtM6d>#V2XxhM z8|!^)KE?&WG=iAbg4}B)dy_9Q2`_Igd)&Q{A-MaXZw{n9y3V42u=uch8#vPsh7x$P zKxhV_!YxD-op*pQMBZFQ*mwKrlkBNN{;Z+6{BWe(Q+Z;7O8dRQ+snlfn>F1W(D|CP zS(Ag+H^spBfTZr^#Y5Yg`V8(oV7BagV_MtYVl2q*39zP|r1VzoSl8Dhcylgt)!#U~ zN*n*3tBVM3felf3a74hAU5~3wEm?81xbz`_H2x2@m03(gY%%UY@wv{u`yEQ@kV%?dA@AvFG9L>Srt)lm!UVL=yp^Tu%>;!R7skqDt-s5thER~+2F6lI=PcH zaUyjL!!xduATjf;y?W#%@(s;p5Mf%g<`A1)~Gj<`A~b2Cs|gLQJKY}gZe zOZydQyq8Jj1X(r0BzmYoB-Arhb;DwONqHvU*Jp^}vc zMNLD`;(k_7$gbG2c$aB0nStNzER95+j8gnlN%LP%MQj0J6)|QNXjN9Xusy0s|ysT2x+^3L1|) z-V|)=(^6!e*n#2n*q%Z8|2VQUe$&4_gmHXXI-@yr?Ax^|Dn5@u(jjQ&5L8leJ9t%Zvu4sR>%8@@bST=xy?!dH zR!P+njvx(IPpw-zS*7)z=^WEPD{$u4|IYnoVt;}&HbRQhe9rwL!s!Gs$etBb zvb(EGvv!ZiPbnIh{W4mrvp~upv~W;7@&%a8`5%F30&r6$08)LW0E;}jbcgU9s`SYR zXiZFQ=vv6V){U9de|d}KR1ezh+9*4={B{*=!&vevKZWDpiy4?@t(u1Hr`vMwA2$>a ze6y}&*Dgm9hmAvubEGHGs8KTlpOR?N{#Ny9NU5CLCRFoc^V-ee*ZZV>L~0~P^gePZ z$PWbW3PXQ!FH>*0McUuE8ST^x+Viu5UtD|byx^_1(=<27&54_bFM+_mo@FM~Cjeky{yVOwEA;=M<#H5u#5 z(=Mm3byOLvj1&4Xq};-Di517gcPTDfR&_5MK4_^Ab~?c?Q}^$bTsZrVqcooIzs-!= zYcSM8OqEj4x8RU;@#maoi`h%iCkh%(jVKDi zLtplVoJi9_vwSYzSYEIS`mRKK&EFagjlqCqPO|Eo@dM??Zpq`c(Ohai*c|30THHQ< zr(4ov+8h!;^m$X>7|nUN#+>_b3{<6&(Dx(ygJrX~{iRW8@VH&yCQZVNwLL0}4doEC z!zrfwY+@#3=Dp2D9T-HNrn-#49z81`)TUd~KT$Pm>u)tgn1{9un6H^>rK zSqK8XA;Zzo^D+4tKhTa>|Ag&bnI^mnO&=(p-;6}YP0cmP}u#+UnJm~j8%d$n+dlq>Z2u?V8#%?^gR z5Bvr(9a)c7kEdvfOcvzdM48#mu$j5MDg?IgC@A9^6M{?2KBj zR05my4hG8>WU?wPiK=xi$I_mFKN%Qm{ya8|_0Ty1Xr}(>cF^w?pMm1C>|J$>u)|AM z8zK#M|E>M%mj)C7A4j1<2$2LdncT5U@~BxWOXHt#WWT4T)AXMS4RuiMU9o^l_+QFc z1AT$jO|Bhzo%fo5LR#(t_(5Rqa7B~_`?muasPF;p^8Y*~_pKm|GylP0pVqfI9=|b}f0;J?EpPxj6wrFQ1NZsg4`Loa#e~wQ z@=RyAgrP}*WJ_+=x|PYSNGTb6rCKqQpxU=kk%E^zQpGR09XrpRBFm3Re-QOWkbIa( z>i2|7V0@;WnrQvP4s5%aV zLO*R(1nfsGl$rZjL>I_QCEwh6|JL5&%3Fo7sam;a^$|s_4WcU zNi(|m`y&P~hl2ZchtLLs^go3)FmqI0ZE>v?>@umqeQUUo`yo%U_@NGVPl$NGucv$F zI__)p1Lscy@M3>*0RK1yR3az3*Cm0CLkqvQBOTuI3H zFgS^0d80f=H@>{#b6gX*WiyYxRPp^BX|pM$IvKkU^R50hu?N6`@X{4!7$#5kQ~Gpv{_~O(!*ZK5Dr9DH zaET%|gzx4ZJv8I+jYqv^$t?_g3XH_)5m$4% z(u@yg64&q8$kMfEu(~F%&$qkK37z+ZmX)N6=y|Wt`ZQ+)DQ2ds;&bvhMH#p%vAJ9!H(>^*pzSbeO6>e3ag=iU^ln-{{C?dRB5BG3yK`e3L6cx84l8SC~TYRUglC3 z53aK}pruy+1irTk!+exk;91YT-q-<(=(cq{m~w1WjN6H}Y-;(fLQ07j7CJAuU-FER zkWw}Fh~R0Rt+t=dl{IymPDGV_K6!NAXqPpVon7t}oSajT*r^SB|N7i=`8{!9_4Unh z&EKX#=-hipJlTTMN8DSXH+ROD z(O^DYnMI^LaBM2bAP01&y(~U97Y6&D`m!j?)e^eMHEHsy$Ek0tNE z6A(}F$zrSS^qSFA!MW57Qr-u-koR|i+x{+XF?1|>`zFb$X-8Jevu%hVz&Q&YaRujC zDL7YhVTyix?}&0MS-be%*3D`KY)-H}K~x0l&?b&C)}*L$c~|oTUrimvpTpH!=lN?r zZWPNCQ#q5~XCUKS9My=0=dSL>Y%B?$G$)v!fraK-xC#k_3rSeE*g>aUT#TP_DeEY- zimR+Gxl)}mXnb|+&-f)`YM?gIb9#tQ3kaS)d)4Y^C#;TMfGigbkSf%AWP7*=W#!%HH-&HERSq=GDo$fkucxyJoUaiAsz5CYwhUa@y-LJKq5o| zWW%t*BO7nPpw%?yE5c=%J?U+c>41$LHxw884(r*9s0M*E$;4GNqvo7@+j4Cli92)>mps3rUskd zo|LYYBg_W_<87&QyGB4fXt%EMjk1(0hv6$(pcliN7q8v-yG;lp>*qLjwU&Bne$&;|H zx*xM*Px`gK8F_=rxd*)^FNN<{#yU>1{hZ~6l2{!{xuS%#uN?*SbE={+eX40qtoHVu}( zC#JpV*-eDPaR-Cd`$D_bUzQesTDu%9ce`9^@CUGNnVjV0=eq-*O#o|DD&6v>UUlhd zXT<@Z3A>>QXjryC)EDRMm2$w0H)({X$RI@%%2-dzw+A{njjhy#(zaL3MN%}YrAxIJ zAPpiuS7@F_8?weal&Ix&CmDlpqLr5Ho}EU!W~!_5i@(2!iesm<6@!|1@=?F`Er1j0 zLuY&wS4MYaifJvqOt{OY4i?f%7Uah5b-dI_4dC>r$XYW4hJQ}be)+O(Bj`u;rK zx&iN8@lN{9dZl@gIUpbn^hwsx6WFB{mQxcfzNsgyYX-^F$^GgN#q#^ghW#M>qG;?i z$`MQsrLu?(jVqvu-gxPLHPgw={j}n|@rOSQP(=b-NWjSB>qIe7Bme2@5o_h|5Rv*s z0L)<|>X5v0jNtQsR<}t_KpB7JKYNC(T;);UieJT7a~gmO{KLGW#Q%UzO#If17Au)} zjVcq+n`!*XAteGMnU|yfv+d8yRs6}o1k=}Hh82A9$6yCoui1OS|M;vNKr*GfW~Pmo z)&E)iI&1-~K-&LvCw}EB0B;4@j(qhT0*I{(|Io?~RRTkJt?S=uuGHmsV?zLwBVa7;cd&H^ z7&!ax#D6wS0j>h5mjDNxuMQ-Cv%^tM9xq!q(&ESfof+Gr9vgr1=%4sO_aQLu_1>4^A#iji0%(Hg9rU$spEy>{hga@cLbkTLyD_)HhC#>{kL_J6eAV=&s%abVa|3KFan00Pp$h|7>FPe?j{H zk08xPnA7z-)7i>pYJN|_8py4Xw2-Zk^q~A)aJ7v56@gFAaBr|F^`UVLbs{#$%#xaA zoiogbOyffH!%@qJv6%WJShs5Fo5lgSYm8rsz9Wif%U&X}4KlyLN%X25x@q_A_O zTP@RLl%~>~%n;;t$;F3C`(Eew8ssc50qDYauYrl_%- zW!=#xzh_vrA@c!s3jc;d-XA4yDQ8(1i>9tehOpN9g3`B8wsMz#^i4tP`UVP>3${># z-8wXoH@c3mPa%hrD8ogWTPe#QQ~3!04)bDXI$!Bt1Uubx zuDRK%i0n;1$@F&0=nW<_U24d|1ztF_aM`NDai;w9+4tXYU8ppP$F4x6FX zKUKYK_MYg%<{vKu~c zy<#VHoc@M2Jv_&4EMdJIr|Xek9 zSk{%(VO;1EBUhg_(gf(J4A=dc(^}g`CIPL(`ktcSrqrBM!THnaq~sz`NLUJz-7p{# zJtNf=Y3qcCZ?|0Kc&iX9pU30Qo%0pi45bc3g6W$fotX=+-Lv9l7Xs$kVyvnp7YLpz zxZEV3lZzY1%Z@0XLX)&#n0D*l9NN1ukwU=6Vs)LTnNwGY#qLijGw@qZDNmwmhXxt& z2?+H|TqaBK*!!Uvrx2RC(UMv-h+;L&8JnZ>Ko^A?GqXmBmFu!;%?;ndX&tJAp`5KzVv!nWfe!j9sF#ll~BW>F~SgG}%69_DOBXS5PB z+z~y=4((z3gkI_5l_?=9CL?~P4$)O3gDN61NWcJowTJPF!=_|t_y{K`4rz-0xWua+ zL?46-RqX-_wOHpb%uFat=WZB zQJBSm|3q3WWRWMbnR8!4ilKqG9CvbA1)Y6jv!X`KUa(M+pgdIU?|xIIamp4pPc>KS zce5jYtEs}}mYQqk8E(Oa($?mkaLrWWF_e8hg(4&(eU{#^57R~Y461}PeLuXC<+-DZ zDHK6b^V>P~YZ0}@*~{e=Ht}0w(-j_L?N$16xM=6`MGoV(x_Me{KH8d23BUleueB3P z^Zp?*of&ysi(E)m9>qMuKA&yR0zG+O%FdzlWlTp3_a++lmm)lgYMR7C)Oqv%(7rX$ zcm)dru>)iSNT291do+B*IyOJB(f)ggfjfRMJ3(Lu$3A?QEBn ze%iVE&zkaD5nO!g>o9Gso(%;#`=0cn$kVN{(VVL7vx@X2bMHp5xU0ghNtwk7`K;mF z&0?ql7?v~#f2d2K6mBchS>6H&bu(oZVmK@0QsZbqZZ{wN@Mx+*n{uua$;o zNCHRD>QiAw$X#e(8Q(8p{#xH0Z3RqzD@9=R$U4sO4wfcQ6XW)^i;Ki>Q<-S9(Iv`2 zI*Er9U%*SXouPwiDf_C-(i%_t{OW7qeE)BQ#p{i$QLiX8i4T^bF#ay1YDB?e^C)pi zMjccslLpcuQZjhcpluC@mLalXh(41~3+`j+BEq#7rWJuU^%#L)v?%}|p|^zciQV`s zgdeO6@2ia*(F;>g{p_q~*?(}OX}|bpOFZW7AZ{#2R(hw)jpa)Gd>4AmLFj;XnP>Zm z->N+omip_S2GMEDXIlt`OuHQzO^e`YFyYF)m5DA2_&(>L#{t^m;lmGWt(q|2tQQkd zJ^r-W;PeUg-zw@Ad(f4E=7G2v&-e#sl>~9sU0oPF$S1`iT=SRw7<#FYTir}ic_{t# z3R)?=)Y`$Xk8RwmNg5vnSNuZ7crDo*$598yvY9g{xQsLJ1AD*RupCf_AO;<()MUa7)oD-5(gtM+ zPrMu98KcSi@a>XL{WyD1CmNxTN_r9rt@d*uOU9m^WFdTz*)&k9*&(r8VS-@g9=?Jq zpuYY(%j}#Q!e4B@%d8~OoAH6Zg&M;KXS)`hQBa=@lv2uNykQQPgRj7|SvQqh>yUys zKI&~I^z%p>mSOHHmHfn2o8z3I7J7=38Xa}6i{$RHrw75&Vzgi1I^$?S-W&&*75?2c z=wATUijC-ssZ=?Duz-!e_6P1cwFtnumHp=X0O;)TKSF2!cd`H5Ga#(L`t|-_oc(|O zIH^$%Q`Bd7(&G@@$Kvu(_3Br7Z%iN41#YYlubMDHjef|o;4%wLoBGiFBU5tJl1fC) zw7wek`-|gh4!oMI%atQXdez~Y+TQ9;(p1ylBZFyV6Oe+m+!Tww+~j>`MB>ucO{2%3 zmM1F0Pqzo`w_f^jI%`5=*j;skfzh4psmuuhJq_5U{!n|tPgHI?XsC6@lNuNxwx`%v zr8f!7L5%$vrXlj@fn;r6P{VoOs_yB%=q$GJh~!j2VV1PUv~u+{{+!QKVO&Mrq9rDt zkhE->vw%|TZ_5cMB|9(PJkx74JJvxqwuvLvn2=3crqd=~!~K44&Y9^O>J?O{;A<&q zLwNfFC~8T8=htu)`8)-G;Wyf2#nJVvA)4tf8>_idw}&Wpck?NzO!yFmGcwqG;dX5X zxy+r=!ATw-cJ>>|+(_N8NnHSX{^%sjF2$cX58-_1fDC2Bf(yG|FfZYHvEZmNCk6bH3D+LFym5 z0hON3u%2UvOvG2tXOJFLqPWP5IoH$tgZgfrfxYkaKFXP}ZlOh?f>Ua;EIFhH1v~%sx z3Osh%JPj9_$c~pW;7cA`ZJNE6vO&SV`P3Io-X%Fy%Ek^SZ4Pi8{7NYS6xhlx8Qe2c z;JUfng{4@ObK>(bL+U<&CEzD*h6`8HgtsJ(aYnLPsh<@?Hd?{D6do#URD|EoPbK9G zoCU4|=F#GTgg9&uB#T&+|2QZfX|u@!<`98pm3m{CalS9p=ajOzphdl==WG#*wy0%e z)sPte6cuSMbb$vB%cqEM2^n|<=;41hliLz7nCz&GONp7P?qDQVs$hNRd3uRo z_U1R2zDId9|Mz#U4f9PZa+IakRL?3>+CpR0Od_qO&R0h@0v_$V|2bj zVIJ2Tluj>z*(W~R%yD2a%3#I);+^E?oZ#1i3An?ak8ouz$(`7gXP5-v?!9=01i2ro zrfiYPlw@3rycp$^blOFwI?2ii*Jt|Ke0%v;#GE@6zz|@-wQL3W$$sQ(drQ zE|;h?!B*4-kKDIM*I|6P5lBqSH%R7D20$yo0lsw5**RO%;=$z5l)yWt#kcO8_EfY}ehwVY=LZg_ zO?C`3Lj$Sz)pZk8c@ev~x_^KQCCs3$?-3KVmS9~+C#g4)K@{Z|Y!&HZp+2l7nSc0>J z!Q1eS!eL62Csr>QqH@a=m4=P2RnQ%anX2@O@!aRLGo#?yHlIgb+s+6 KR5wPwMFW9yX)Ted$yjjH5EEzuPeS? z@gu4`IP)=HWQB4uH@m7c^XlhD?ER}eH5#4h^h+)@#mTgU=1i?UGw72HN`GE~`5sx0 z!|s`h{>D;qk4Gu-wDZa&@K+A{w^}R~WP{to)q+`)og5Com=Vk4dlo4W2Zt6j5kk>zy_kf-b}ALS3c&oQd=3g&zpOkg>7K&6`( zg1MO)E>!W>g7TACOjd{L7E|+LZ_mo5l;o-u`32ph)-u8k_kG9BVF@j1dQyco6_9qB z6#n8`66GDr!Z!+p>$bxeb|hJ5nGfZM(_$%9Dko)+c^A7IN*{3Q-ur#`Dc3pIcg~Of zW36jlN!G(N=N#i6_Zat_W6t2u1}sLG5D3&er(O|jZq1Dt7buR9$mvUxjxIMhWIaiV zlhA;%E$@P<+6n`76_ZRtl`(Lr=0@hlUez?75X6Qrm*)EG!J6i|Df2O<(+gZ(zO3hG z)-Hons=tj4^~Vktbkgnwhj|GsF&sqhdm4IqH*zTqb4)tr+HLj4|_~^ z%>|xnmO0Xsx|fx%zw%$yJKZ)aNgXrl6R~g9eo69kU){IrvZ3cW`T4kWY_~f$I(^wO zC|KR7WT4OE5JET=zg-fm>xAocAfD^0$>FI^PcLM@cQK9&gxYM^oCax8}cJ z{2rv>!}Q5LFV>SCvuo-3%#L&+=nW_`K!aDVJdqOFMXl2ya4kzCKnVuLsRe%SFSX{N zep?8()Kz>~Us(Dq$z&Z3?BO}nVU&TZeqZEU&`S6^%0}aaiA;>(68$jR^J6Pk42{zv zBXD0|hu^Tq?eqmFUX6M4_3Z^L_bTVPVpPDQxJoCpM^D)qZPR(DoUyg$?b|~uw3>>! z#St^AkRKfAuLj&}@vl-`4j{Gw(#ifk>4d&!$0ou%jKI}#j_-R_!eC*l2QSI)oH3bRk{2$1*x79i=O z5ioKimbY?R4Z!)26+jmHCk}EiZJKo7nW*<~^*6mEJEGpDe~nHr@1U|Me}O(7 zJA&(LrM*mlRC@&mD5w8Ubo-Av{+A>E|H~Yk8A1z7yeGLlal)5E`!+p>k^<#ecpko& zV`07{w*Rd54Zlb88wcmoIUGki#R4=*kMtLKn}6#q6X0?qQgozDaVY*$=K_#S2|4K}?%9 z5yR;o(3ruI3k4&jhjoWCX1Q80t9R0jjXf)UWjHI1Q$d&tqJ&nj@xmn1*|S2ntlgF! zKu=GBuh_)NqJ!-@PM%!Im@Cd6kQm1b5R9YAUY1JMoS=D~s4F1UD34*eY#x=X;+crf z-RkY6c5g?0Pd)M$Q|w3RSbYBomN**uxR2FCyLVY_j4{-+Q>8t|prQ^umYCI-Xg7XD zgaU_y{5*;z5QX; zLJQ1!&KI*(Z{nQkx$x>~M3cQg{4duOTy9N`)#L{`7TDGK$B^f$X)jqWg!Yw;A8%1DuTNfDQZ9Ap}! zd3D5Nbd(%ZDGOVOpUg=>JXc0k9&}BqU_b8fm}SnqzG;y+W?I`)Q~t!}BY1HxEOo?x zB*-tCFY#l;PL%9RzbZ~9d28m7&hzoFj`n+#H{J%R4{a(i%Z1lO2a6-*d(3-|?@!YyS$U(l62ObE`2i!*7j z1$&>9d|cTfQ{H4NtcD12YGef5`SqjiH6;;E2cm{a0IMRcA!UZ#Ygxz++=d}oz94JO zHx7kFVJwR=B{m*0JpbQVTin2X=endWo2|u-z97|S${1)fQmMEb}LVrye5&M44 zvEy)wz6v>z#AvJZ@07WhQDh3?JH?SQcNn*>tUgq{42jv4;QK|b>Ha;@!tv3p0oi9+ z6ilrt$!gA%88IadiMjS%hn<=eu`2Z3{brTEUFdlhb6gxZ_*XpUh^VmT#KOMku_|&ga2zJ?VaGj<%V=UVaT6`uL~3 ziN{YOZFGSZ7KM2E$b$F*>Py9OdOkM(_^4kJNT^1R`ksP)GULMF3Ez_|k897H&XQ*1 zM<4p@&yNI|X8C!qhM5++cJa<=kW89PFQAu)vW$lVqmY%%c5D$f{^K2Qn`&3F(U8=% zCX(rETu+GNNWGTeW0r4(B^l4hDrCOb_->ga>n5MP21!Ar1*|{`Mu*REVGbqmDAUW= zY>?f*=q!jflr?G_@ZMq}^QS49VoXLjWlGGvgAn1$)bj0f(BHb`NXd{+Hj?PdPu}Tv z*gc4!i3-{=!@M0L+*HcDILFj4&UyRvpZ#!qWZa$;=t*-db%5|?R%d>Ao&3FJoV@%x zYKhOd(MNW(pI?$|^DlInzkvbGpz%a1+PK!P5j2L~fQ)0%e(Ymcu>|DTie)fu)y5NC z-ub#EOd0I=+hP5oKY0zdUhA(rze&_1_*D-_JI^gCk0N>g=PaijP`<+Zb`k}8mz=-F zJQkpHBVg9EK$p{hNg)0YF1P#7v;H$*KmX;z!~fXC|A#h_i=HSmPXw4JC9hDfXv^Dk zu|u_DOfreao_DsWoeQ|xFEFf6fSV^@j)W!muCu@YS-9WTXiie!H6>cj760sCssQ-! z`ueW;_;xE<1LL&Z>*Dtw0?Gf%P2gX&%Rof#*$VIb!Dh9=l^51&_Fk@tG}P8?Ukd>` zBp6ZtdrH0{sZGExF$aj9)uy1`9=RG3+&Lop`nSb7^J-p&_8mdX@A6HZA3&a;Egv}U zKG_m=t_u-x?^s@XCS2uo$SWg;8~hQ!fuSGxZMmX_HOQLj@Kka#mJ6u8({p+fk8EiP12PN9A|VGaraK z#@O6&=+sexS4|<`fPn|?JMd{OD0t_;w(so&S&BvWzZB)t@xr)iU+J)d8skuTtKy>J z`W-7xFfEdKjT32hTh6nru~fQ~fZXRNk2Fz|uy^1scR#+mi7M_-AbSYGx|TqT@B2a8 z2qQA&6R^ZDtN_TldV$78;zLOZ*W>`_}8F8!D#*C1tk&28v z5ZCfm%OFkv+O#{XOerW?iLTus>HFc@~aJNst>trb-Z1Psc0z`kSx4q zWtr{zOimn@{;K}_>_EXsSq^64tV5;W`bS(43OGE(aOnJEND!*T_CgtHpi3F$n1FHd zF=#>PKzip6syq{i%a;cYY!674cOK6Lt=nvYCC8eu5nCy^MTg%$JVUrTB4X{=WNNJQ z6p=OCK8K2~=+s$}NN`?ULl@66{&RP&&%eTy6Ny4#Adi>mgrAJMai^#XPD6`rTU)+3 z%7{gpb6MV_w}t=foav!{EBa?^B?tFD+W*xNTYL++ED%_oYHy0|75eYS9xsg6@X!x$ z><#fRin;h|?jaGT`D=T_D-1zNAc9}F?E4>EME^sj5DWn#^545Z{)@Tr>jz$e_Xcfj}QV7L*e zWd``7HJaaZbzw{g4%}z4AOO@`+uJR-8nks54%8ny6uHFiKR zjj@re{@Gl-X>&m7($r=)cQr(G(SVZw!e8&qrUnPXu4P0rw6CkftH7m(Q%z5Pjhmf< zUCMv4UH{iYhM*eaX5;C7aVpcuXVkAAYCF%hhGVL5i`yP!>nd1sbhmpR*;yDRsL8JJk_ z{{S9(cq?0pwz?3#!Q|Go*697-h5qAtLWSfkatioz>{cYAhnh*>wRo~)Qv@?l;UP9Bzmg8H2-p7bkh4;K1hVc}YufPI3 z-;|oRU~Xynu&+sEcU@IP*bY394iJy#4k}r3(Pm`4X;}`w^j75ntMAJ-i3s_F3$n}7 zyHVzNh?bhDaKXi6H&>Vr$kP|BZAM&hga(%Ch00==EXMU0To$|>EU!RlUsmIfwlh@U zwcd6(6Yu7)cl3Z<{95B=?511Fi4ApcfiGdZFRuKYKAF~*D}S{a%w^HPD)|<%&g6E0 zJ7_Dobu(M(307lM$L)UG`WzpGvAGxL7gc{b*I&q)#eqv{eB()NMAD9 zbNP)HgADs$CDo+>Yo2*BNGo~g0sYa7Arz^!IHu=m^sj(lvU3NO=ktS_=!^b!64-OP z`+4Jb>CWr!*rR+vY2b$CnaJP=N9Y5fuM03=O{aH_H98RdBK-`rV0>6X#xmegZ}(5D z;+~^Q+^(J6GZ;3H? z{`H4eZc5oT-WR~~u`i!ZU#)sQLN#i8mzOnzWtL*44{YxB?e(cEhYGFs=8Qr0L5KOw z@r5&x@Kr7yy2HPL@1n)F&#hZ`Q6=?^HoHrsa!QU{c0l3MRB>!uJ)<$*w@+;Fu*C7F zs-jAaj!zr3?RXV1!_oP`%y`_N_t29+cE1_$1`v+|*~)T&F;$yxNttm>`wQrE+q@yo z*kcY#I~l}MySJ#REI2jPoX&m_e#cPgt|zVCVcwo~f?|E9410(3?>ARD`I-BLI5%^_ zfl!^DvFzB?{XJ+Bkz%yuX=!+Q`j|KU?>}Ugo0pzYl$bt=6XIsr?P7O}7bH^jrZDB2 z;R7v!wXpTc)>~nr4w|!Ab8S)g%o7{!AAj1f*1jf8T5{;kCiY#%p5^v!?Q?qjLj0+Y z!MXjhzWdLnibyuu!);YVTgVb`t&(@a)6ac=KHR%q^4=Fiy5@Xi=!sa3UM|8kQRg7PAz3FT8 zdy;PJ;q_{58gzVrTX*JK@+`{a$N@pR&(aa7K3BVAZ0z_X8*yr4=AZxk$^kmKvhh6m zW4fS^)^u$3FF1b&fSR@UD|85EZHoX<6=ZnrJNqA;Bj-2$u2fszH}B+YN{5#~# zTc=yEbjTRMV5q>m@O4LTh~GsFvSyNxY+7ez{FLOVzcEp7$#!xqSvv2te*<^U&Mr=g ziIOmfgEj*P_9JJXnXA%@jmy?B@Vbtb^jq6L=dyi%=aYgf32HTCA<7Grd5c$^HrnkY zj!9rUksoqq9`|WCaKf4DUcTInXx#X`1)q6)bb-J8OZKUk7p6M5&*5n!eLBtl@+_{A zUnImu{ok_q$8~SVobE-hI-WUgKEn)LmlekC^ckoNJEU+7)|-d96e9wqb;EXGz#tZI zkg~x2biZg9e9wF6Lm@wX&E{5&cy~Z%d&eEx)*XYxV*rR}Z9DV#97z>lTAa*(fo1a9 zK?Tr{9fvy@$3*uKpIhz5fEfdT_UkxkTG9gu1*JQH1bE^9>O^Yt>;J8rpwQBva->!%!0Ubf(9`u-+|NYW3 zb_w^zBC;Ncd@PK|ko_AwBw*Kvq!}#<@6Cawc!=++ z``ku@vI|b8$@5W%h*#ZPHjy30H$o0_*+})#)xf{n_SZ*ctZHwnt*YcbM~?YAyj8Sn zh=^upcTI}6+d9|w+=As?{_>|Ic}MEfa?gEO#x~x6F|g|RILu3Qqup{e6HwSGnl?H9 zS33@&&O_OCHtoGs*QUBJke%^

C9KhW8=4)(G#>ASCp%j%6Xpg^NBqrDM1~-&;8}s*F1s z4a~%sfABqt>3|r3-jr=ir74|xf;Hlg|2FnyhuI{&@cyu460u7b?zty^$MSM4N3Vp8 zXLI{KE^OL?ztnyG?-N#@e;1raPQ6wFw* z7-`HxhjHHv=sc!+U+lb@;{5&OPVycqg_mUjbE(!fh#j!@_XQ|iPD4B24*SE`>_)$Co*fd$Zh2||KwEF%%1Qm1 z<0Mn{YG5Z1K$`usclazo_V&fS&uBXssIxoI0N2w>O9~KBi&RNVJYqoSU|Y86 zKj(8o#r?Z!~iMH*xI!;s>G@U5J~8(qVsb%oZY--tHau4Kh4$g zOVfsG(9`R(wVLtuDkZ|fCz?fa)6MAIsD^*A*1{ZRb+z3dzP71_U-P(P%sHH8RZ(m1 zrhRKO@podB-pLGdd zM}v=>&RpSP;%28atGa~#ImtYhlk~*pZ<>97Yn|>lm1fA@HTxNzRF}|Nt@~O}tZ?+& zBkR2kC&-n9(u&Zi@*3aG`WTAofEVw!U9IX3a1rMAh!Ulo9l`<7H-1y=ZH;J+oJH63 z(QF4E?wUH%;HgP-10xSaW6!*^xetA~w8*v`DjiCV4i=^JKAz{hzjeN1@w=&*?IJD- zN%trKq!$2?mV2in#Wpc)CKR8wzrbxj$0o? zpXvJg`I&Ixpqfpa!PxldNR{eWw)*eP86u>le5S8f^|bIG}W zm4|J=)TTmGjn0evp=^r@fTOV8@LFm&n{7n~uH-;(JvjC#eru}iJGS!&wkN{}&#?{9 z{aUe**!R6+y|x>ReDO<^Bg|e&^zxjj4(gRyP-yFwr~^^P8h2hM+T%d^?VzeN$Bo5b z9FmA4&gq^!Lmvk)87`L=Lt2FZ_Dv7Y^p~RNIzTL$M*|Gyj!6BDj8``#ISZP!O zi1tbP0QSlVp;vb)3O@t?hve$8PvLu@8%?bHQ83HXry~a%f_*nLf(zC1+ z0b_NzGRyOytB=^ob?gOsQ~ z+9OqrtSwsQfT7Q2yOq>)?ykXYhN)KDdt!wTU#!P3?a`_4f(`)I@NxL2-)G&80-Y}r z7(>JI`dt0Joci<57*YO_s>_KiK&2!Ix#u&s3{>6ujo4>V)ZG*kSnkN38cWk=5)k8DNQxu?h!~BaLb;SygUt9lEMG`xD zqb}SZ;&nCp$D(Q-omFnvGXf-cZso3_?aE$d1c34|ezmf^6%{hw#ku{m_ec{R%g?mu zT$4KScWneR+qAjGC%x@rCXumQZ~61*bu5k!-dHaQe7MVb0Phe~oDN76oUhQBcnxs9 zOgdpRUwBiU#AIz5bdVke_7bKEgvMsH?@j*CWHfeXNcB5I0&-T4T_YV}JokZaOmFw| zQm-whpK?DkFBGY}dvCxw*3|rm_a15q{5qGj12?A02p13Z*H2m$I_2t4~*-T+Md$Ko~2AlLa%L};<@Y5&t$6|f%-JCOcdUT5Z zcd_Txu89R4T^5;NGsNBdYk%r4)&%4~_dbTU5EHw3OBaa6YI>(!a5tJg9RMe-lWO^c zc@2A*mmIhYAoQZkXKo<9-YH6E3OeMMeOxA@?%wleR_>OZhJH$SUNsfr5bFU8fZ6*;Ob|TcqTFM&jnOuN>3;o6v~; zT#E>1ISIM+VB#PCaJf>;XYP5)wDSPA?NrkgOA@WHO#4BDf9iNmvU@d`Lk-ewK*{A2 z{e|g^6d?RunqC;-WUvCTEnz3LU9;G$gC*_{(FR%~uCwibPuGX)JCbEa28)cd=&A)Xrpop;+Y+E_ZSE_x54?Xj(bcDtH;wP4>96WC-f+-!x+D1s{rN+s zb|Y2p#CJQY$kksFT#~oxV#u$E&O}Lb1pSpPv?9x0cjS|aGMop{77h?h76}4h<3$aM zzXZGp4F-PmopxoLSzj#j3aiU*j<UEEkWY0(tQIdR- z=iM%kuYY0FcCT3JNn#mq+p{GQG1(<8v7L~ScR$B;xw;M(_9MND)1J#^rX%Oajp`bw zK5>0JW0mH?(#Pq&AT;o6W&S7WTFO~AGE+U2yWWts$vCZcc2S<*}@#gB)Xg4{GhPS;X`HuG=^j!M&^$Bk?Iz(CW4b%eZeR)bP z{hUXS+v8#aS|lD;$XkhkVJU|A3mMHH26Wq@lTB@weLNE$N}fw;ity#9)K6hNTA2It zXG#&tpaNCCh_=AFi#1R1@%_hbK$OUP?|2KXuNU&#Au?U zBIOqr%xvDQYE<>vIZ7>vzwowNZIg~PFHUdtZKOz#cpxBr7pr|E#3W zc#2tt&At}o6!*PT=d5ebtB&eTFPq8d!p~0Kp;Wl*zcm5z`8wer_V$6w*Rwsu(n8kn zzoMdUv<~MmC&thqRpowi73;V;l;10u5ja&RcQ|LsdzgxvDqTOCJ$#{8A~{;iSzsL} zZQFO|P8G=`6|Jm>M=dkPM2nc~;(coFgF!WV*uG`Nyh?8(>?(nYh#sa6|A z#p_KOBC+V~9MZi@B!SF!J4uW+Q}jxXfhynSb==ZHdjlh~6zO`E<`g@^!=%DK@r#f? z@Z#^?)Ov;EVY%isO-W_RZ0o)|U;7E!4-T>QU9n7Fwz4ODjMpTTmEOAqEsChhus7$W zeWf(4Gh4A~$SsM*>)=~JQ5{tx7>o8CU`gC2&F}=F&FFywJC@z1ocNnf-u4wVSIVYRL$Ga7GP+s@)gAcnmGu zy8;b&CoF^`bmUjp3DPX*l{1Ue1o<;FSps4d2-_7HNT|}IsaT3(1$rU(hv(I3Hj-J{umr`TzV0^!H z63?X%g3~lTk1GqB^@xZsn=Dcp5~Ru>5*}u4CXxnfifl%Z)FDruz!OktYcM?Tk=3L~ zpKZSNuUf{Xju(?(ty}#e5)xN48Wix}w}l_5$w&a#T^ycl!YlA{p%!`hbj~PBoM*{Q zU_s_1YCAZvJygM2LrWF2+1ejnXOvUdnDEg9K4+}mwUfQe``wTEavA!*2l|h+KmOV; z-TOOd^g+_3h3te9D065PebD}_R$5|8X_m*`vGfVaMYE3!G7<1ISE2T$S8CqDbXuW> zez3^ksj}`2EuvNHEzj~fhiUoOi;r6z-urUAMQoJ zWZxiS0VKK6cM+1;otYkyMS{!b3O>sGp-{od=afYdg5`l9dIDo7_j+BHd2nkIdkR ztx9%A{z@1thCFLcGBdt#>|+RYu)F5gdMjDH$979MUsHiBW!7|ChsJ3kR$wYgVQMi~ zm_9&}=m8zgY~L1MA%|MED3-XpP0O!NVYk~m=BBH}I~J*@G>Avjj4_Q}5y`G6+FO_7 zp*#d)UPT(r0C9d%#yxTd)8QrM{@^UXzLS09GM|0s`@2E28_{9-5o&&kpTLSPI)c@z(*P@I-&K4( z{g^tMIiThk-FPe^=m($ILC}e0QbJ!EEXc0IC`s`3; zP@bf1Ju64{=3F6C3OK-st11Jmmh8Vwik5%W)oFNNBCSS%D$w=XuI0AO`jeF8#mHLC zR`*=E{*e{+%u~i8cTe?9kD5D(-BCHm!e^avo%TIh8mnpxNm_kjmD#A#25rpN>RpEL z#dTnS`xVfyt!M*OS>d5p5m*NfZE)G1X;Q64eS@{v)vD2Y%d!!wIJACC$+c)o^(iD(NrrrPETsGJKc6qyV9y&j=qpyqgTt@A zPOD9%e|lJnpjnm)f{)&g62u14UI`t&XXYH7M}q%IwJU=11hWRa={v!@a>;?KkJA$Krb8H$mQ@GG;9Vjwh~0HT z_`R-UVX8Jd?|{WF_e2cGUB9}5q-UKL33H8YifOQ-Jbhr1w>c}|F8lt z*!{)U90ZkBlB^w>_v%PXb&I=^`Dn&@Y}7Dq*3YdJgWNV0oApqlS<>F}AA1SwS#p%W zsx99DnqlK(sz_L?=CmbTYU(W#1aK1CySwO1LL(vqx5@Ij@xw|A1|^(otIM|~S27Me zQ-*W398=bKzxbFa(V;7Bz%2yIm`@A!TvN{wtEH zEV+hpzVh?dIPFAJi@qGydQu*g@r;k_&W*8^@P6sb8S}VAHvjm+9;LC~=+_D@8;kfh zNAhMM#m(#kKZ(}9biUiV&K+^en3LutzQWrydT`L}(n=xW@}tWXhwR@Zr6(xrOTLP5 zxU=yq&fcafI7l3&fcFe|zZwJ2d^AtVx6cy)c2TZ^nhgh8Pq7<=_^d7ENu@$tiMc_m zQlqVd)WfYGaKCH$rj(ZEztq%@x}Aw^?x>kjlITv5tZQneJi25aH#9xh4$0J{LDUuo zHd0{vF32u;uaiGwkl0=8Y(;|13A?k9wZ?L8fqbO8=6Tr>zO?zT?(XMB>dq|^`d}a+ zymT}&r8)mnRaYVHY z8Nx%Lb(mHYBgj#L&~gjPTum9OP3Zoz(Rf22+MoOHko z%B2hjB{sQ4(DJ0#=t`inez@KOg4=QwrhR*zxmzADNvi9pM%d}pF=V?}!(YyqJ|VF> z5(##f5~3sH%aGa7Ug9v75WK=QT&}F1*1>DN!8&;Z`bcr?l{ zfM?d)%5)ss4QZdJt%)Gs@kDouMClZIMI=rU&g$v}P3stjj;_mnh>GyQ~e3_@UskPMKiO!Y*bI5NY(7puV)bmSBo? z&$Kn69&v#{g$-+hXM9J|LH%db8&`a$y{|g55m;f`Xn5MGXmE{UJGru5Q<7`iEV8>H z4T1;DoavN%4ZB0@uEjw4x!RzkJTRx%c9-C6t27Dkw5r}N%iQ0OCB9YlmcgG&9BTbE zSYLJ+f1}P6M@26jRkVFvyW~BzeiKD!{KkDY|6VcJH%&r za~H68RW_)tX_4*(P+N1KKkM>;#UdWNmQrsply)iD^WgOLF{kA9uRy3rB`?ochu94l za{VyEgU2`bU$$)ge z@9>iVMNo4~T~Zvw8{`0)>2mRN7?!9yW{HyS;kQ&-384ohj!}+5e_3d{yzE@jU2}r> zXrzI!Q)@bj*5JOMr_=mTw=UTy6J;wZ3F?*6Y*nsIch^YPPQz+YuLd@3s$5J<^a#E| z8D0o)X$`Qw(X%4PGt8!i4^SRAJdb#r-ytC8>G9epu1&L@(i290RV5VEGJ_Onz56u4 z^h;LsUE@^rtYhBylphZ)eUF}(cn=&zzHsN!z_Up4@RHA(bEi)?IhafgOIV%s!@ZO1 z%%s7}$E`M3FG6)4ba%QNpGNp!;|7u|>oxr@h#uqhh&7zj;(wW2uM@uFjq86acz$K<~72UZP9;(sEk}H3hHw`t4 zkEISp#_K?ap6*ibW%Cr^#H>ALUEU~Dg;pC@URspZakUaSX}`3dBF$LV0ql_2rMj%m z{(nRe%f&`RBPy z-OHB?oCVYDmgI2_K}o{&0mr5>%{ffip@bKr_QjqJ7EKGjD9g!RReW62?%UYDvx3%8u` z3hqvW#(&eSVT2-KHJEq+lv2m4w!1?#*h#9%KJ5FYlkuuQA0sbJlto&IaTsyj)Sn12 zaWy(ucqu#cNZH5dh0pwfD>dI9=ASjc$*DT~s;V_{XhS~`YaUP*_k|f6`s*_EtjB`6 zFsEbpJ4c4&X)OP-;Qv_g|0WAA9-O#x-2TD&U*VyBN~cu&*+cuRtWa*NM?c6F?XOQt zu;cAT#Pis%DYeEI7z|Zg!H+NS>(v3^C}8t|;WApdVZUUNYe=<%G61)A)6}X1_`PSM6{v&;C)NuHy zIE-A*>>t5584gEV^MATlSQJ>4wS_ChMPKIR+abAB24pkh21eEX^{UCI`YfF5@zlC~ z(+9%){P^I>4ONx>ULP0S`+K@%1gLLWajt>1#?=dN` z;eBmH#}@8ym-W*I)!s_XR5r?;q$!h(pP>q2HIOF<|6d_{JOHrMxOs$RJX1Zv2#3jc znNL~K)}wd}lz)kJN7q0vvNp;k<13T9+C3+EEQ`9#w9GotIYez)l)f@U;I2ru%rnjQ zL+I$PgX4x+g(Q%w@6QTm!YDP}vNf{904%APDU60gljCu|cK|AKDkxm5o0@pSI8_*3 zNL`Pux{%(uyp=MeHnt4}-F0EAyqK|5c;5knwlo3#jsLyrJ%?(I)OEYN3&3vM=Em!f z&wwW1|0*t_V(jNSsvbGpY~Z^nw$*rdMKWZJD}x>PWLx3yO!gB#yf4OFbZ!;<^+^B2 zG)@*tA+V^Zwq{#oqUM-nwGHy{*5T&3z|EM{CUFU_*-)ye=%+b@w82%RVmyCwv?;~a zniM-I$j*jkI)QEJZVHQ+HwK+jnNhYh#=~Ay0z;X2fPNw;vI`xqwQ$}t{GQ{%!eVt* z!qtgO`Uw57$aFV7=ak;|jYSKW3j-%SCa$oksR$e;y_>?iIi$J0fcEk$unKEymhjj8 zxM8_bZ-(l|GYY*6Kv5Sqe}gNu{A!6u8GkAVO}FKpH8T-aRqy}39OwE(R#52 z2YbJH)|~T*j1*F&>Q0BxJ;`vMX}!Z|y>$n0D)HD0GRWH-V_iXu89I~HX^@sSAv8pxrNIqvr?Vlc1zaytHnPcE)SU?*Yr+o= z>Q0{?20G}h`*m_N7}Y;Pd1Tv8v01GfO256eqG2S(r7uKO^oV_P+G5{)z#Y z*lFm_jvN>4Aas2d4j7kLqvdQRCSw=pXFQ)hMO1XZf^!y)Sd#o`cbk`9qhhG>Wm+3) z1ph2KaVWp_{5c)MPofs~*;2%p*3)28$f3GTIoZINl|*Zb3fOCQkfx*1pB> zB0q3qF4P~`Nj!0d(3-sXvk!O%f!b3gz&mFIREnOkZNV%Fgj$*Tq}S_hL>nS|Ga4yg zdcv1YBpEDYeGfG=v|A=h7yt)cuE=&zsx_C{`bo%~C_B&J3=Xkr*K$p}7sO&cL5dlzAD3{@)?L2EdWP&==vi?z>)ED=+qMvf5()IMGnC-CJ;8sl%Q0 z0*vaj{%zbHn*ZV2TP1b)$3=l&jV9x*ieR;k9AL{VCtBy`$%jv3rHa?IPBmOsujF`j zVcm{!<#_JUWxl0#d)i}yKv;nd&=_~($}LVWd(#_1hEbYngj@Oo~)OrozCO6OkEB^FaWMF-zvF&FUh~eV4mWHaz~o}GM?u) zm@nm{eLH9cWr6UKT5X&C<4`|w4c-u^5`Hgn7%!ehBchtv%W|f9r|KXj1avf-``Lc7 zt2w777M_`ffMx7Mv3URlcbLCcQWc^UY^?&yu%0{&fb^ARQ{){!!O5=(cwE(lLJHA% zLt3xZP(=?Q-$1>&qu}*+1?5dMo(DF3iCP0yHM+x2qz;ue%|031${E$9!fjFAS-DV^ zL1W)ua4KpkOgt5VDoLXb#cpf%0e5&qWmVMXD0XAAC>Yu!u8MILXtM-1muY#HkUUWa z{@485=7wmJ!6NT>@6T0bZ~2{@#w6P3ajgJ0*o@3_8iGNi? zbz2U(>8D-9!NP~o!P|F8&-ErB;5$^TefZzViJ##-M=3d_6YLpWKBi@C*VXAG+J}W@ z$W9Jq5M1qOwZGCn-=<*4{~Rj2pT>D4=2s9x(Aua7{{x4059y`JD7*)Kg94X`-e|MluDA$nyn zqpaJu(9fzp*IG<04^GYg-TRb|fSuhv8=FI`T7~IQKAYnDnv;3)-PlO!H#WI)MbXzy zdtF0Zg{i*yiodKMk$i@}9hrWh#N>6xMFRS>HMM$hgN4EsNHq^nx67IM29JEZa2V&9GZBPmFDc-4f865AoB0zA~jdWkF@&8gVFp zI&%Z6*h32Xochg~3tdA}Y($(&M*5k-z`76`{<0`)^;OJ_j0_`3Y6eO20veI|6FmL+3;z5+|q){ee?TC=zFG`RW z{c%&vT~ViWv~9UOo!IGBPJ|5))k-P}mGo?S{5g_*l_PIO)bL1STUeCRR_KSe!yn&G zoDWUkV1LzoK=0Q1DNf4=Ynrw?YnrBEYvP**iSQ|re))X0tk6D0t!Q=il6y&oYxUR z%~om!w28_6_CGOZuN7{$#PZRE-J3nGbXP1}XCqMl*>d9b314H94FtEGiK-Fhvh$S> z=6wmz%U)gVB9+>Q4fNE~G;0#)X&qLXYFY&U!v9H40D3gy|5XwI+C2Kz+D7gZmua?# zpPY?BC`R5ujrG+YfM-By zKTVDyJc@$s*~>!wF(oAE*eO`Y7r!aWM@pC#0h{nBtR^JxZf3G{K5iKjAc86nvubz9 zR7p1OmA^ZNq>H0Eu0}D@fWG;0DpyXcgmkhL%DnX&e=w(|pe#9{Wf=@4?Mh-dr!8eN zjw~E;tV>0ArKPSs%texvoLdT%D<-?B@NxOTsA=S%{U`-RzzKyzw{96lpVbx>anK-> zbmmX8N5pfu0v4Nd=)k>!V|f==#Ki$u*+kLaB151Reogi^8pQCTH9G&Zj-XyM=WzXRHVhWB0J z-FhZf7Lj1y{@%6iI9fg6ryKy&_5PbNY{4^3zRf!d8~$KjGs3kQOhB=j3edH+9)HNg zd9_m!2=TPmX+~Ng)~faG$eXe+z7iwfb>}NXJIDTV2O65zB5LoBo0vE#4Xw)0J|Uhu zM!U!Kf1;A9C5skkdDrIf3wlJZ2mg| zB4G$goN22<9hiiGh>RgjAp{6XS`n#LKx>hiXi<D?R%cS&-Z-idavg@@A>{X{qMf+``-87_u6ay*09%JyQg*MG~STTQAG=6R$mqH zwmnWe5*tN0ccv;+%@3HNT@!?E)4D`()j_mID_Q-H1B8*f>Fpc3!wKMN#t}np($@(} zWCH}R#+txGeO>e5F*Yy4Cx2g)+UF)I`8FqhG|>D;?Jxv6TJJ{X$*2g?bDEu-l&w>ZS8jL z_=fBC8aR!N(i%z_HDK>#+|Q%-f&i|KvHkAOMm*bSM2$DpkP#-1nD!E!c4SSYHt*R> zDU{rNyhE9nVyG>;XE}RqW+DNh9VPBrZyO?cuHx%4Rd9>51rNasyfj3#wy!4WwCDgK zr^tN^k52CSMhbp>ZMo;pu@|MG+B3JR&u#Jb)UI@n{?r$^jO$$}iN+fO1S!8QgE+hh zzyAMR*)SY;^EPnzVdH|iONn0IT7CmcG+ISDj75l(?EGcT7HG0!1M9=cBY2DdC>{Z; zei6QZ?}1MuZpDN>e0xM^Ut1NnFQGd3=!mE1!^vA8KK^KN9uhRO<#|SSdBeA?)cN-U zPla#Ew%VHXzUrlGVX6M1dt*jd$EjxR$8TQ{p9H;p5H7Q;Rqxwot>&4YGw{w|xci@0 zasR^I{}$Y(8BEgWM{%E!f8Gks-t{peuVMez9Z#;7PS6jx9l86_)8ii%YHi zUHP|FRlD|Iq<=N~!<94KPyhal#}{sPOOF>CbwNK4e8fJ|tA7y^2n~7f+UEnCiSMI6 zJQenn>*L-vSM}Vzzs%jg%-#Psb2s&Kw>{Tp%l&3Y?)()P_$x5*e;_d6Y$)qA@QLwVQq=#HZuOU<{_jxKhKT(Ui!{8-=NTgkarG}B=Pw`U|A3EU@gn@j5yN>Ff8|L1 zl_T}vo+I_!d9C-K{k`njmErgIecOI+V|aAU8RtWSnqt-}I;XqzQ(SZ6Pi zVXuyq-F*C$?cVcl(eLqM_Eqk@TYKu`{|^hA@h)%u{)(#q6;=OVMAf&Hef+v`<4wm+ zrz>zHe~W0kwOh6+;hfRjk<903pixuTx82;EnehG7j#ircM(yC;ONZBg-Zh&cnwp!%!OF7NoraTlP&h;i#S z&}P~CC#S}8rq^c~Huy#A*XNoTmXJ^j|7j=5cO&V}hQuYgu^?v`u(+)jV8*JQdr#VQ zbmubB>v5)GLm(5<qra_ebd73aKKWlH076-Ri3uq3iK10Vchze%HieNr{4&QUi=mC3DxL^KsIvQ`eO;J=4VBu)Eeb?CepudPDv2QhPf1-oOmNp{e5r$u5rjw`}qRyNGzAv6JBYNVSXuEu?w| z`VDE03XHG`*f5dojXK}tlbkol<@?UGepEKS@rliZ*^;Z`ZvPI-UZI*c{D z4mposM4JZU(=~Ty^J9hCU{A;5#5VmViS!mf-Ni%vWz%}_xV}v}c?(>S*w%7W0CgZw z1m!Xqo<**_)tBL%SeJ24O%(0kfO#PFg@~N3yvN!Q0V5GvQgly_Nw=q=$lBSV6Qiw} zg`#02F?}A~Z{ADv+Ew=Pd&hp>IC12=p%3FO->j@89PP0DapAihN~5Z!N*q`pRj6c zcc(lv`@}ze#uQ3SYtjbm*JW9?T3;1XUgdsQ7{UvaTr$56hq;gI{16+NJ3hvow`!9d zpS-O(^D(vfM>bBHoC$OPwAH%77*EnP0Z$uvmFrb>hGM&jqbj$D2T5H%HHZZ2qw<|z zd&E$4qZ4gQrxW?fZQ6t%)K9Q8$VL9fTaC7@8g`3Knx_8ltaeIcL9a3v-ycWYc|sj> zdP%ic!R?A-5Pk`FcVHe@AJnj_Ptz25L(>uQK^a}2mJLCpVlqAq2%Ttne?Ov=1cIeU z+=o)4%rGtvBuY@W_E_a1;tb}YA{9b~G_VV(Y*Ue4<5QI4hx0B8j$c&jvlY$DHc-5W zu!WA%gO5Tw(glxZsoVQNZArMrq8}}oumelYaoGEKR=e_M;RB8PA!hkbYAS~Q0GC;* zoM670%{LZbsa5{nKWN-r)>kRpgC#m=c4Qu>@k3Wruf_CwSr|F>9z^-y8@M~rhV(;$ zxw_qD9vW=?mwWvs^i9VIPoXQ++ck`K%<075rnxT51Lp27fvY!B>gVzk#TCeHeyPPV z`0=Ir=f&q-0%7e6f|>8UZI2+JvadvjpJeBTOS!9%pD3>*99C?L7AP=XUSM?fiA-@twkiVC6-J54ViB=M{u; z(#;5T-u-ifqZ58MoP|Sq(6Ad}Kb3XT3!csdy5OA}!VG)3IXAd&k@n&20MGqV!eQP@ zOi^YMx+(d(WM-(yeXoZ*XrHkX22-ZP0vLw^?t2LguzHCJy2yF%VKVkT%K3Oou2d}3Xa8PlgkIf+L{AVgcQ-Hq&NEDY#my|9~U{R|lCqQxB;a*Jv-G(e^lhn>2zs*XX0uhBRN`%%|rV)11?1p5f za_u$7pVdOBH*VjL*t#ux=e>K%(t=J)K~n85KdX4X&i*0&2XL-wz}_EfqTA-I9uF`2 zqB16e;JE;^k{b+5eyEnFVweA}KuC=BKk}Z--yKKu_(tke0Ox357z>QM?_F*n#!+$L#j ztet&*;W@D^=IkJ?oAJR%x7LJM?NRpD+fS1GkwP316k8aJXuxhGn#fyb5e>fyyhQT0 z5kM^tV&DIOv_aM`kbsF9_ax;JE@YH7zvJ!i3MX1YLl8H^Py~1}^Nrq6=vuqz666+x zNV}mKzAQ)`q3tTP8CoW${|ORSu%`*_l3|lE%83RW*+l|8gHsE)Vqp+wy^FgG7rFcGKeYhMMd3m)JUD28Z1$e z*_!&{SP$`R6Saphr7)#{-jo5X>kD>n>?%ph0 z0leD(S?w{>;YnIg-?f|<4XzR-BSQOp_~n6 zC%}B4qGJTHg6~&24`*7}i$R`>S>zCSzoTG0>1ndpFN&kDpxLp990l3>PUe@`1mQ7# z)kTzls``dO>*rLs^(&4}IC?1iXF(5>#X)XEO?Lrhjx3@SwCFZ%DPg8nDB!P*iJRyO zk7ztXB=kuvtO2ya_er%C5`X60g|n}WJ(QU#*df(Z$!Acv5o3>_xWvqMNxz)c84ilR z)M2nMvu&NG46oZZxwR=x$EYQ$&2qExk+<}VOW4FtOsDcr_W1T~g;0lWi;7FHH2t*? z6RWkr_pra{XBF0M`pt?RtAfr2q`rBPjXR z&BISu+u;MW0ahC6Q+C*_I6~>>{{7*`R)o{rzO;QMYy8MT7ST^wor4!_)`t20x)ePKw;g4arf@zqy0B}4$7{zHW9iKLL=$e#McaR|A|(8-Gx{)3S`xbU}>Q|=BtW@ z(dLaD0$3F*4~h9dFMs&h%H?#eB{7pslq1K8hXSyoRaAi_XC?Uf=K;EG4ZMow2jv7c zvC_ybs3h!F?4xCuFqWnoSF1Hvsvg!{V~pUc7S7%quXRm&W*xbjEXxrA+EtkX{ z9HFx}Jek!#LQb>3uN2o0o>O&7N{#AH3GUTIB{7;MwsDA-ZykZMR zyg#iV-b;zxq=XN(2V}{SF-6I3aOh_)uKJyOqf885309=G*rENV(RSX#zam5bqlv`9YsO{WQPnP|vEA3)7MszmoB=<=B>UUB z%dku2S$=FVg31@SHEusp8KhMV+gW%``ai5enHX!fMH~}iZ znq1x>KeKFki5LuvCL0GCp#M2>>GBPB)zf^+Rz{`X`2(h^wOEdXY`dwgTDD%Vt!x2I zA_m1Hu@!wDuN=mcsI}Ug&3G4RNbT5C^8guVU5Q;>L;aeuV*t;LQ-L(Nd07R%O?%?n z9Ru7`piuG+Oex)mbxh53~?-Jbr@PP8X;~um>I8ru83Y)t_9wRK&i_ z?5CBTU7e7X4}!K^ZBu2D&<02yd!`}!uZ}Lh_D`)%u2ksIyQWYZq10sPnbk1C zvDHf%!T0p%d86XPIumm%DS)DALp!XPTQXr*wMqA85LuHDxZe*JRth7Hla$cv-WL04 z#_<^RQsXg|njO-lSV4{$%P-Gsx@+9y{dFjS8a-khXx5gj)q6r0^_#W3&F{A(&Hb|y z@IMVi;gafK8%NPracNkRa8Yue*-5%z0~kejk$KfN7o!EnGY15kKw7GLu+^Mi=rjAN@|ks5uX%$*m~ps@?gcX zm809|(54VSvw8abB^fnF_ndhD5HtZ|KqNxj+>@UO0G7e=Yx*!azIc8*+pP(r<{vmQ=W_7WTyn)s&dR8#3bpt< zRs$J_`-~K4+k(>A{0o|Ge}?L?w@Arw^{GqxIIep}vqt0~>i-z@XkD0X{-9tKYnN~I z>La;DYiN{=wzYR%*!yVXQPgVn)h~@7ZXM;InWKEX!`dtH06O}O|Bu#)B!pT`aitXs5HY{^pdi>w{me?J9e> z%I(Np((IdOpIVbvhy7B9a=UHJkU>fZ(fCmMI|s#MFEFCE*|adjM_&Xwt@RYu zJ9apSY2&JV=kl|m&Pq|oLdI`zY=2t&TQ4G4Q7?aq+VAq6=OKq9q4Izhz}nx~t+Nx2 zXD3=7t?yrS06Chi-OD$MfNbwJTu?$19h!E2`zP=I))$ak#boxTyuvSt_+>=A*>@p( z&c`Jr0Dq&>XmgOYf9v+OZ!D9@B){fQ)-IE4Q8I4h+F+XEr++dWdX}(8CBLSaXQNcK z&2Ln4Q}>s+{x&pQ+mB(!2IA=Lm6UzoC5Kr)UAnSH>Zv4e){9yfZ>PO?2R^vI>znm! zuHJi2n;zJ`48OZ`;>cH<;&whhZ9~30uB9BG_2tLhQU`aHRd3$%GW@+` zfB$@))21sZ!~O0Zf3#eCuf){X(DIW%-AOH6zwh^!$0F_ycp7~)apd~0AAb72aD9!l z;r`@CL!WCc4eyor-#%z)`Qe}L7~U{MIM!P3vVR#~x$e(AXD|;!h7Av$*B(;jqxSca zIlV>c0q=e`{rtveZ;uyV=CilQizz8+wCAfsoge%RGe=Oc;xaj z+@?clH3v88vU7F{jT*yjS)+Jr&_KWHo$8}PLdP?2%b!nZ-j0BGb{U3kpaUlMYEV$+ z`9IC+*I*~v;1?4OrsLzn#gkjQVugJt?10$aYco?y9COgFSLENf6TI`;_{+sT$SaGu zwdph8iM^7vy*D-Px!mU4;`>49QSa>%->2@H7o-y1N0WI)C)_X}M~&il(twW)n*yT? zOa>PAJPz^_oI^$ci;Z4I9?^%J-zX-88y~e+j5_HePN_Lp# z&wGGC^3Fd97nWU^@a9dR!v`4psxH6|2Bu}NRI;P<1xtKECHM4GzosmJewN+aGnx`y zBzD&?u*_emcJD8n#|hx<>Z~a7dHT%bmNW?#*8Q`w#7xdol=v3~+;}@TPCaEH!Sj$H z>l8gA{D@ zu-0Sad5fq1V3u#zX4TcIIG2{%O1K$sZi%_3HWu6#J?lF5N2vDl?y|3c-T%#nvKuTe{Py-$EKTYR2cVV(n{^G4FzUu~xV! zOkdisoXjlBMzlfZReK+#P8!AS{4(@+`T3th{!hdCjn=>Yc*D^5e_-g5i*5t>q9tF3 zj%Pnfy8BDjR$onP+~r8YFX!X(e~?vrL(Z}*?$0I#V*GuT$-#ol+upA|%w+4^5LdPR zq5Lm(wL0L7_^Pp*)}#L<<^O^P{}6ey^B+)oC!{OfHt-wCO2EV?-@vhFhhK|pQX=t| zofjYVY`t*iJ>LlYab9{{jun|PLjQ5HJ-*s`N%o*3XD^vDHuw2QxK%B~b6RnE@+xz) zM#z3PZ}d+%_fI5PLzsVn9(U&Ge=ym>vZMyLB>xBVb)I{PTf40r8AFEX;!iJE)Pbum zw+lThtDVP;akD2VUp|=QJs8TRO-GzkvlhlZ_d~k<9|Haq^Wne9 zGB3m5sWh25;@$TT)8Jo0+yATO@b68~zciQri!k!Xvf}?mQ+vVP4b(D}Yl30eTbR<< zhdG6Y5OPnXQhrH#r}`mw`JU6^6stAs$n6~D5c9O+H=E_woHldK&UbWX-v-!W+4 z>+L5fz0-NjHN!S0=1r*+NaL|)J4qFcsJ>pN@}2yQ4roSdt>N)2h;GE(l5VkPy_J0d z$S-L%^PBs#sr$J29~QUr&9ilOAkc5NI$AzzYii^FTXzKx(fNjY_a_Y|)c5|8FlxfV zwLx0LTCPP2e*d(owcjC)oPT@GG@#wbE=-+Y8_(IZXHT-%HT*3D-@AhN=%MBRwEvSW zhmo;g8hdX3-)H!Sy;V+h-aoZvvcc7ze%RO&8!U&cbh)&wzI3T|NnCtBieGqc4e*v1 zXpI5++ALWDP&aY!wQ=Nf?Ey3sfrbj?_jvM0Iw=`*O)@_sGNoV^Mi82p(V-fY*V3~D zZaS!x?@A`6XCvesnMY}b8xYGJ))wF^+2q>SP1fR}2sAw8M0nGkKuh|LCF z<;n})FRhp;ybu4zJeIL{Rh}yKvyq!(==_#+58c}=*)TN-PMyJZ&MV>jNIV^t(mNrI z9E;}EDsHz%8K>eZ11Mjv`s0eWX=OxNW#sB*NC3lQ=pdAw=}V4!fm5;vjx)F|2V<+f z0;28vrc*<~Ww#~jH_u=cH-cn-ws@&a?ykkoj`(jNZ$iWPvN`GYg{fyIx2x}i^(Te` znDsn<{SqgsU-LElE`O}-pta)rne4{KjdGU<&jDgludnFsXjQe)0;C2Xakm_S)X82q z$Dry^sGwgE>CuFla;(dFsz;wS%*i(ZhSx+wE2`9*A*?Fe(=GUtr*7^!`3y5DQVHNw z43=EGHGqq4ic0`l2-uo_osmxfv_-7edF$Ys@$Pb*yBrEMMg)XnBbjEt^6_@2PxEmz zg-4?;G^7(a1EN38P;;vr`iD`mW=irF?$aRah!PsHg7>r*`6wf6KcYdkcc|kb@dc3U z+?r^HC@QYca3K0SYv@g3VgJWBQ=#ea6MYKxPy2bAg!ZrOp zzz}u?xVT#IlOYKKHhS6@7*-+&X9F+qDd2j^oFO zuU^p1EwO~kET<@eq3bwe#)FZjd_H?#83xrVpGv~cn<-;G5dDx8ry=)AOA%Jcm2(&O zaywpVHe;f@cvJH62j7@^h@Wu7Sofk!Ua7NC)`O?{zPAmdAT^vvv6oLF2C>Z}9cgia zW>~C;=zB0L-)73c;Gis-C%{1-X){<^y&3j^^Zw*Zt7*ZE!*)v3UX_`btkxb-rz@{h z?;M8k{a!}ly<|WBtYY?(G2}WTzESR!=}W324ZN+}fZbYI|1z^u@-zV|*rfFdMnI{^ zGTEmr=s3N8DdZ5Kkp~xOWR2b!OJc&dF0RBCF&hkr2HXg)6gTg$bP4cOVIS1l#EsTS zdwLq0QQDBlRrE7MESC+~#uz_=7g~n?O0WluZh$=znE9c>W;evm{+!T!9`vP1Z7fR^ zcGys}Ts2bg6j6R+r%3~pXQM8DBU0~lxE^a?z^-&lfITtJ&N0Nrs+cfLZ~56B@qB8W zx;|6E*kc^&`=d|^=4}I~y2J`ws&lHT5_jJ|S+|UtnN8La{Q?U!GFUCF<)D^1PG;V1 zc}@ag+)<(yFL$wZ#d(&5!^ZN6JHx2jm}rA}kGaJNXn}u)dTp!39CyT*4-Q#gl&P71 zQv@m93%mM})~d2{ruNe6Q{wGRpE~@?3Vk@`u$1r(Y>%$QDOUpUUHxWtO8dw;dkO#z ztw8IZ7K+06dli~aH9gTStpR%y9O0Ct? zN(;l8T|wgFG=1m%C7{H_fkn~wEn6Kh?K5siAEFaIbV1$IxjXGHksVni%MC{qIC|}2 z2|-FNP_ayq57NM|vN$NsFnKyxZT^%q0)!Fy8r@{;^Z(e%XZ8O}BGrp?2 zK<9b#wsH$qp$`-iRpKpuieHc*Dj*FA7v#IPK+2`Q1fi~Z(dVS@DXK-kjqRH>q$Lt5 zJajK)&d}p!UC)~tnMS4#V(d9at0zYYxPrH08@%-G$|!7Il)zaths~k6MKo5%ZEAS} z1stPpE6S>hq(PA4Jp#>;nL}@(q3o{QH)))HTX2S=Sn-1kSVt75WO=?;J2%j)Jq5u9 zD{-LpoR+5AF9EKDHk|GEvw|y1Kn`Sv6jGpu2vi*8oo4%vBs8?rwhx}NQ9Ql@^({?D z^U^$sJbSVDKZo>Ujn(g4S8Gc@EraP6;dXfCBZEPZ&4KC%?@$7cpm}9I&Es0pqV&Xo zEhqG*QC8kS$;DFHYp;-)r@T>fxLw&7Mm?RC?A~zSC}t00z;0AdhMN)4ymOn2P`sjC zf?c}JSe#a1i|k(5L@|{44?y#JGYUJ&R6}-FFEV{3ZD!5-S z3{glCl-@_T+5EXI7POY9zP1FU!RL#eLL}8JBO89)Wcqqg@2kWzUkZBu`$`f7?}TAZ z2GpP&!Lei2Abag|X)Y~KEN}=7jSkipn;%KHz9t)i;`i`Y)0m~v*A8_v9HSH5IJ#a) zLXs#Gt zG){+$4;!5f*a=2(l{hDGfd#LKTDxPw76ayK25X$|YPvh;cIOD9w3O&3Xr=F%O;8% zQa#2=ss^hE?%WAQ@xa+I_X^(lGus?|zdt2g@-?cSJ#cq9IKDIvT=ZIXgrMTd%g-e7 zMd#RqZH;u6|DG;r+&=1XRu9zwDKGl~F)BU8vPm%*jFlN|V)sh<(u zstr#Z9aJ9Sn=`@oMmGuhbh;C=M3|drF%m9v%PR1-P=lP1uL}#C!Wfl%M1$Fu$mzkV zq?qMWxB$nao7{KjL*xW#sP#ta;d*lMeg~BPj+2vfcxIo!9Y)kwkT+l=sw7iO(RBk- zLY;VwJl%lcr4jv+p>KZ9D+YxV3C*FUE!4AFG*5oxmF`U$o15d-i7`ESXJ}Eh8RsGJ zo8{z?nVRNTK!i1_uLWCJ%N~()j5mvi@J7xLb(kP26}&L*n_m|m zSHcV6L4;<|$b-hY1zAWaCAd zBZIR{wVK?#TXsoiAU`vn6bpe16Nfv-t&xt=!p>S!&mDwt9f(|s^Kuu&Ak1roZcj2U z8oqZoy-1W)c$ z%t8njU{B;~5wlonqo%p5$vH|Ko6yq?j(I(#xC|jCocr=lWqW%&v9P0XAg4}cBwNaz zk+b@Q6M@KDL`N~{Yl^X#H2`^uRa^Y77yJGI8fKQhUR+O)MHI%Mit@nWZs$gtt7zy3 z4u&~-5h>D7OOnh|G3hjSDu#K~eYitEK+JFru_>S-X(43nVYqPNxEYIUyL6ROG_c7M zt)j+?zOTlVP3La!;*Er!UUoGwrIK$r#F4V8aiBsdowEH35SdXPh(R)=t~!i!OK6R4cF(xl6FM&Af#*3}^l zjT8M7aKqBN$%J!)T20Yq8SJT&?gV0(oAoa5x1qL=QCaXY;2~*O-+M}a4Z*kvTqo>| zNf^D2BoeZEN0NI39MWz0@xk^17MnzsJQ;eh*OeJkNr)Cj^CT?DhRqEH$kfpKx&(ix z!}q`-8Cls`bdg<0uM1|K3+U`}%gWUuDZRV3DX~#k0CG1l#1W3DiPtBuhoxTDzBigl6?E9+UhCr#=Fo1C+8Q5J3>B+b(=aF9tpjcCVc#n>5e zsiNcVG?`UqB=cTM0|)RtSkv7}gbc2@6cnY$^~r?`SG?%PLtq{g$CDRb^qSWf`|gD^ z-Lt`w-Ig`1XGSt*BUFE7wosz2;;Vhc3!i(DNoa#92+YL2=StGn35*)elIF zh%#?Lj#z>iGtThjY*(~?WP>Nq=Af_;4nkwax#I;n%zl2p9^Na1`k+CbxE%?>IVjP62vllyHJt_qMRK( zh{-Zbkb$BYBv&c&7<16Xi->69_kG9;79urHNG{HmpBS#lK_WPdqu@}cYM&NdCoup^ zf;e7HE36SJpQw|NlcT{7p46l)n$>~`d7K{#lIo64USgjB3Wl&Vk9UB%aHg6W(lthM zpP}8x4zovas=@b>;-0NuUf|G1v9dT->CjWYG`CV5BjEGWOF{K5TrGPcFLDg&&$?L8 zDSUHf1bUIlUwNTo^?b;Kn)d4RcCevYk*hZ}w03oCana^wrl16|+K>^_iE{FPS zXw9v7`sFDCM@Yt7bC-0jm6QSjYa`D|-d69v=oj;ACVW4-L!Cv?x^Wd5%S;xQNT?GY zXMXO*UU$({2L4h)XwMmh?(tbI#Cn4n7gtY;(u~p>6$|bpDy~jHr_o^TE{bMfK$MF= zi{97CbFHLKmsSfqwPn~A#5ku$Tv$|=rn}#(XtCjhwzuNBsrtNpP@pv>0GQL)+nh`w z2emAR9!deWj+!p8`1v}acyWec7HV@^TV_agF9-!R>z(-cw!%D>u6{N$Ch-f5zQ3?d z!%D*RE*d@s4&;78%BPV5S$;Ei?r!03u@EXQ#NaRtPnSzAe6&9=TM<^9lBA2j=%-s6 zVswRuvTbzyCH8?bQvU5CyA3)!P~^=vr?D5xY(2*zkB06xG|1Xa;W=r3#y+4;FN;Ox z-J*ze8e^ozvOy>>Px0&5=8!k&3*@&3 z$E(EwBt;L%g@cc2Ib7RTgueBF!(c&gGgpQ_iUN-_Z9xIW5*cfTTLdNr>jpx_eRdu! z$z?&Hq{T~NZ4=7sN%p6{a*AZYWj;=z0Dxi#iYZknfL7A&RVrr@u{_eBZTke7*Bh)u zvd8nzgeczEnkUXA57|wh$IvS9N6O#@a_>ZW9rAU(NqdxF=K-A`In+~etgiL~3#RwW z+zT;r&qa+=?-XubXx5jR$H0VBkoz1wxlVEz2K$t_^MJdU zy?hL06G6@GRjA~_rS;5>yp~X15J^?P#KDUw;zWueQ$|>;?n}4Q5 zX7?yFDK2EX_OSCdKfmo8;;R)EJ2wz7jcHDjR4YjxFiCdpo9AtfkgMV|>ZSP+KlPZ1 zOdGZ#6vU#y*Th7sOmkr}i@T(6ps_Zh{bDu!?Kz|ZlL|&rz!ZP=g)NT-q;=DLd*~PS zK3Ae9B7-w4&~-eeW(p*0E#r~TAb-4XCrpT}QNHE4CX6{wcrwa zE$o#H(*?PGg8`D(Ap=8xff%?i}qWJtGmr9Q!uXiMFQkv4nr z_$y#@q|R{EnTRY_qBENBFFE4JGL=lxwk##zL)c0)cJq75T%xaNzB7xG4uUMO4j$>c zgi%LSctu=vo}nt=>5w|i?f>kb_v3eXe)h9L_h2JRtp&C=ML<#eSXl_Gue)PqCYKW< zFHfr1-b$h>8k%b~q#io`yfTN@5l;Y3ye@Dx>SX|mj*tp%Lv*hY$|X)wl+I`!dNg6c zN`Riu>9FeUE-@X|v$MEg!?jjXI^Mc0ege`}5I4;*FE|W$HHgI++2k3$om6lumDG#? zs{G@4tsqFq*v{s+@n^wprQG6=yo$8V`uYJrmk!8PIrLa z?4VS(BYufdW85uCkIv6w;e_)z7iXc`P_p0OE+wVNS95IjL_K-6Si3U3XrBW`UM+I! zN4_+ehf26|Y|2BE#GeA^kg#~?*J5>95W^O?o0qV7i!%%o&kM{6hZJ}GP)mRXbF>6E zR)z~l6Zo|VMk_}*A|+${u_^UD&D1pOtR^oDrq|NkdoW}@-*pVArDE!X0Leqr9WIjj z0I;%Jwwgp?Aav{=F5OqI)CTPd>7DuxKC=V${#tH4TI*1Zzcyypif{w?hHfUn(Mv5+ zn+eeId4Y>2gGMIikEfDyEwCm&e2s1-1^61S->;yc+Dm+}M)FfyS$pyQD83B~ALNEA zLJ?N|m}DKfRC0@h3!ysU<5PK>**u%xCG|NU{<393qdIxUv1Qm?Dt$SVa4mx)5Xk4* zg&qu;!PSKj76OQV?uPXE>7jmU@@3HBylS&11f+|kLU%9^u$SGLrF^>o%xXvBl(Tq# zRhSDVFIQ{taR&5x)6TU+7MLns&;UwpN71dvxUqRHMX|bsmeko(0k$FcLbPft9|#iq){AmSPDs^%s5FqH3X98(U?r-~~m>>WoXQ ztf|WybhW+Eh&q~^SRsURCF8|O&FGgyD!Gwq@BJKWsv1FN>oH_OLl%?32nyjj?O--}%`8%W9>R3g5Y=N8kc#g%BO66jh)|uUrsir7_Q9!D;j|Ut>POjg~v&lTD;Y=~rbTlwbXo-ww@_ft$ z7tMG*_vF<{W>6r3KrwAS$R3OBsj|Qess#oIw}4VxBH3!Eeet;+ZTcr3b0Ef4xx%HC_^3YZliINH=Wr9_x6&C`91-*%H}EgnB`hpVJ44pG?k5pvDDcrbO^YL^I1ct%GQOTqk;# z6CI53TYavU?Z8+<_zkoU;^tYES#bb5;vKgiE57HxkBm@F4HqDa}7lsES#=b z;)*GAI%bIUjT5o~eqy*|jwNlJPdmVlU49G}@v?yl#pdxSFk6+M$F~g?L7K+Zb0JK& zsPS0>x~Gf;voob+h9IP!+@{GzuvgScgiG?5VL}wIW1p9t%e0f{Dl!?tJjc+n8a9Kw zc%{q&p2>)IBkUvFg0jFK;<-!1r!kf`(=NDsl>W@l+kL_%sY|gFa?}zDdy)aR>Jy7? zsh*+0fCTL;?7>#gP-r(JYpHSBXA$c%!N|JJER4n_4`m<7bV4E&1$}B1(NEKCEX}pM z#>nC{Ij1}n45{wd9!%(&jzht6vMSLlP0N`bLqVb#RzeOVqzzX_-P{z%q#EaK67oiq zGb&FoL4GiIp5k?ydsOZ;t8^6d^Xl@;-RPcZd@}!zLXJdXdFtD83yiye+~hKK%zzik zh6V{MX-f~a^dwH^z5Qr~?zU*IGRLp^oQ~b=j8`m5585GV#CoQgfA}rql!f}T@#8+?OOYSskqH@nEiBTe6vXC*ORRVC!%K&FQ7aV(K-jCP zP8{s7)fkEdZU`g-b8yjbzF(@dDlnmi5CCIlba|JhQ*&oSqoTK~cr=XLqHeB(%bH)= z-Q;Ord5P-B!o3nQiUiYn$Ah!1_fs7X@N|Q%c5kw@uTnmCB6bx=2y$|;f(Dd=WUk@2 zN-z7W&w$oTl%z{QD6#TlhFTvN--pE^XUl2gtmZmWQ=LN(tHm<+q8fvfN^QLo)Qgvq zRhl1odRbp|kIg&|$A7>du#dW?=WL3@j~ux z+y#46EOsx*2Ea*@r_*xyq2K|UQ}TMjk}PN-cK)JQ!Y_#1PVBiS2dH#$rE@T$ zN!*1@;>?i|rAn!IDVA|KdgHlfnLWcl@j;J>U4dLYdn7)tV4CQ10AkD7NuMGMoMZS^ zvfkn(-~`KsF}2twZO{#1q^Yf>g~H~cY~7U*iW9P#CpAxw#6@L2Lc!AkJgh%Tn;3l- z#gNXc8}Jd%?kR;s{5~2e72<1mL_!u^(^4-CH)`uZbTTGb`pW^s5^3?9q&^L&^m|`X z>evCfa+=&5<#V$#T@V}8ancn?S$(J|UQ$E1BZK%Ckx@kmMQv7(oVF**pApQaL~Tq8 zeS5~iKeA4_Y_D^b+Nrf79rNsQEIj;jA8M@;2p!oealNjcAO@9 z?9|OO6b%htigs^`RS!Dx0AO@aH_Ws?RwrinPGsWPbhONF%1Dh_65Wc1HQSud4#N>WuhJvs;LrWRbh{((=B zSQkMVb8Q84bND7>%N=(trVcZueu5#?M2!KT`Ye#lS#ij~)Q1#^gdp)*U7#aM4S^k9 z5CIFBA+sPbrO)bn8k&Kn6zjTqR`qCeo@=J3ykA#_&Rs|2(Crz7)vIk5$oA)$5Xq|> zz9c6lw-p~xC={fy`V_iRjjwWk$x<0C@b_|Ky)T&;|4qfxf_+xa0V^YYSl*~D3q?mz zC@js=sJNeMNWMmg`oW-5s!$G_A>>4^R$0h8m&K~+N-=tmQHk<6-_C(M2bBOYq%kL9 zrcW~iZE)6=_az~b(Ik+_S4;;2xGi+-4BRWKOomyF9EoNQ>ad*F%dEs4srd<}pF{1i znssRyT1KxGzD^l~;-jpiTB1uSebnUDE|oil=6+3wtW*Ag1+MHhEJ)LSGK6b3=#-}f zk*}a$J+j6*eNuD-^)<94y)VCATqn+UIBh%Gck6)&bstQwk| z!8ew=Bn(>&tPG*-T1h+$$edD)&PX3_5j6L3vX(m2;$S>0R<8X7ORJVCx;TdLRXo6GuDmesl7;UC?^kT> z1io$29lC9FG&`PNr{ZO&4L0zL=KX3#_}0)_mk|;ztjmX@fr1$lm0EC4!yBk5x+#57 z1HO@Zm7WD6rJPb^>WXcY+B)vEMf-q!V!JTaY{013&(Ocqoplf8J=USjyw=mA_Z zODP8{uz@i!?8oH+Z9~!R+@OL^robYO<IkPkeg*y@Pvk*cp6`UX?VEVLtj##JfJ%s9aO`Xr}$XVwaIWK;he~{ zCX?+>h6kH*Ti)ii`j9VCG5Y4y0fJsOsjWjJpiNFir@z>{uYl}Bt`Qo#+tAJ0Z=ovp zbehd8xrys0g!IC+jM^bZEIA%@(_KMNxysx4cK!Y%%_YSpL?aXCxI(Gp*m(=GKtU}z z+?%qd4}K>e`v;a zaxjk2t4l2|Bq_2z=RuZA0@Ef)((t$>ovWnlM$PsE zflLNjP|Er}#1S~ilGZ6jgOT0|#o>HMKKnOx6JIq6*Cj$`-V#08I`d z7(N|7GIT(2Rj5?r@A`)6_4=hD`()=mf$%n(78)>`qR%x>r+7K-p$1JSC6|m{#va)&F}#Z$GVpHhGAX#OU|2t#Z257=BhOcaLpv9ddw zDUPh3-rDP!cQI^t=0W&$Yel-Cm@?JGHz`aH(6!f! zL4IcWPT{+Zyq>Nsit0=uA75G$Laarm-o?|I_%XjquRm_e*#byaCQ9{nysgcVqgta{ z%OPF~>BkxaXlL$|JYdkz(C0?lZSctvA3~5cVo6f~+n`?+*T7bM{haobCW*U(Jpo`x z*M0E#EJaZPwl7~D6q)k<*ZcOtW8S*Gb*tFCP*=(?h$G;0Ua|$eeS7Q1O_E+oiM}hX z5~>zdtS8;SqXjZVTf2Hl?$Cx9AKeFA3G7;eOQAhst93hsOFQ2@X(ydb~A|878_xmv{ z%KEf5b-gfiuaSTRKq;EG7}^K|I}O%$0JYIP^rV%nROs&v`wr9s;+Mjf$wrYQ=MDAC zImS(T1?uiUKhFTI2iQ&>_AQLbx0>LD_kf)M-YG`4j@xpbILAOQN}>6Zea7gkK;`WT zQ0Jh}8d))u%F;P$S0YKTKvVV4HeFaM-OaV+7}yB&iSW7`xsilah-=%CXTmVR&g5dv ztt9#6-U~A#YlS^pmec=B z5(M_MsQzes3R~^yax7!FcuYe*w{f)OT6zL}u0byU{pwC3x4%gKLn!}@5u5W~55?eR zIzEhVpUZc9F#ydv$tA#JwCs63G%D+5Ll6Fz2?3K0;Dqx2GUne(W%9HrEDf4^ieta@ z3Xq5|Y5>N6@>hs!s_T(!PMZ0{w-MbBNUsQ&Xyl-~ZCR)YhC$=V`Z1l%lFXOpjDs2x z?-gHq|1U|!W3cx_zWx=#di^;Q#PaWpKNY2|TgID^=%uJU?0P64;rh6(*nJgM`K32V z*8krSOQ->fSGM0hxt_FTb1SlQYwkXdMQa&)S7f8u+j`rW*2(J{l$DoY5K`oHPWQca zvqD4SxA)6vzjfmW9R-t9I`%~-zmpDh%KTE;>E@$|(&jSh6<^b@GX@m8=NsuZ(L-H7 z!rITTebxGd1IBLO#=Yh_$==o*tw4Rh{x8*M3OnuHmry|6Q)2pAJ^JK#%d?BEe@i;L zf5q>a|KN_`gl$a1t|Go~so18ouYOU&HmIBPQ_)dr2|e8#F-^)#6|aan^E`exZ{>S z9P`65?USxz!it`Ahiyy23>0sE=?#JG{{kXCj+YduEGmn3i6w1MK8c{#vP<~>?Vlsk zxQJmlO&8@;+jB)#>WL%|Ywr}LKKpVXgBG{dk!9(lS}6LcRw~y!C9dyyQL2dh%`eoG z2$DYnQR-&+kIC@uD2rIq%7W|ED&@6PAV4GOz$>VXIl?FJly-}Lq8r#OczO(SgbQA9z@_&sW6BB`uH zRt8`E^I1k6kje+7hec=;1WD}c(cD-?=oho`JpSgFZzDI3*Hd? zP#&J@Wi(J)+$xHWfc=#3A}w0np5?#U2mZRp0}Fd`REyk%(>psFpK;d0f>$DN0V~$D z$9MY28~5?Eis4;Ye< zgTlS-5jL(_qh)nbH_{TyHGnpN;bBF2`7*#17r5`rO*atG{hz-b)hh3j{NquLIp~t@ zF+`~!iY2@iN3x$tzJ~JkPVsV7%gqDr>sKgDOC;G(2aflT1MKLKEXuAiA1z~gxuC!% zurUkp8^XK4c7$~ix=wET9vU1u2U(M)ZTF~-NONWc*RXf+wza4Za*eJKVX$^3%{7&3 zNtGU8p%)ZKwuPyC;s-vzE$Ez{e>tn)ow-D@-CvAOUGJjQo6BhaG>J) z6N3~Mg8>ECD$+)^M6T0R(D(o)FqX_Cyz)=h@o70aUk4^C3N|bp7z*&H{+M(=Rwgl_ zh>=M_GU(Ujm{yM;dP{D#j^2cB9O)s!*}-Hdm<4h3<^ponBp4nG(FQtb@e%$5QulGu zrzgr27s-2o&y){*&%)deQMTao{5t*P&(h*Z7{8Wyws%VZ+#I_Sw8aMCMv?E(HS`En zjCyXKnmg3$&jsIuzt?A8n*zZB+Q(T3Yws-|%vh|Rn_h;CR|g_<5zP6s{KWTV-2l!6 zw@|6Hl@CZ-xKHXmQ4PhM6_X>r@b02d2>aORduzL}Cwk7*YJA%I%;Jdt!Y5&bbkb%*qO$%wOeN~`&( zmW56#ptk#<8Jj4rYmW`+w+m?2%z%Zng<7r?Ivy0N(CS8b(fm&C0qFbmYoc2#6`)k% zbafg{U<%x1WGr}Aw(uC2mQx}&wnPRDyFlaQ@10_o(9q)M-*!JON8^xpKN*_=6tg#; z=$7SgFDam0?b@&vc6tP`$&m#yN#%|T?M+~bF>qErEsfTcmpS5JZ|`XjrVA`EZqLnQ z67*`ftled|>c;`Y@n*!Xz--3j+Qd*nq$%ROpe~J@py+b35a2#gaG#NEU?c)3#QUaQPVujdO1=25YqqDZ!_JNv7^> zW5tf(8lH2H7a;>TMziksqBy+8Gr%hyal@H0jambNwlEZZeU3cf+wHBH?;@^DcDfo1jv{L*%p|7JTACr zxqbz}!%$Gqc?j2Hz#K@@fPVM%5ZxwmIJ$R=e$GDde=?|BIsm;Puy$xvhk`$He}K+PiDox_yN^t-~vF{@};nL#nLJK zC_Ne2d-J11DUMED4|C{g%~)N@(%}ZnN34fH&sZPJm5e&Uq>t@^S_y22QGiNquMdy{ zP-3eILZNl6%%bB8uR(H9Z;U37KnTr4g2xOkm+mo+hM4wmnPFgpM8Kydp=r* zqr%GPUrH(-VV_O`_m4I7J>9#10!$?2tucF?|A|v>!>xklk*#_#=G#q`GH0#5(#kuf zH?RExX|@_D9XbTqHStr%9Y!1unDXCEDD^%t<^{D}H)sh8Xs?H~EkbK@LqbOwH~B+u z;qy3BbX^8hKarFWtP8YUP$+{lFhXAIeFR`p2WYDyHr+l@{6O0CJkaKV?J~!%9zNb9 zp7M!@d_hs~lK?uQq;o}xHz);ov0S@{q=!hM;Zf+T4ALh0QuXAjL*xCt`#&J7OY9Nu zonuUg~V_w6jvl<+FiTr&ui;aBRQX#7X1ukcB&i=Ekuqg>dTRn>>_?a*d!5v;44ai zF$%sgs-^D_!O7r3_;@ql7JyZq_-r;<8uFa-_3qCXr$^&;`|a{pLI{*l4%GcM)8>yv z$|-XL)jlq(63Oas-7G4MwB-wjHB*^keJ+cUsoZTAZ5Gs;wu+%#+H0{2jh*SK*;Uc7 zS)okMKWjMY8^IZLwFu1~d-F8NYyLOYO4L*|?7dH)DWGyx1PKMT?>^t9PUG`7mio_} zs~CKeG8NQCTopsG5xn5AQblOi<@H6HD<>K2gr<%-PvC-$GQA;3t9-372QxeS9yKos*zO zDw#Khc|?*?r#N~!JKG)_9jxqn{IPFz>uLj6QJHCTZd zyymBWP-|qyZ_02@SUiTRr!g@D-&n28tW*qBbUx=-D)KKRsEk*JOov>1F8Fn$fhdp4 z3nUw9cJq(fSE@L(`%omWqMkKqyc&{B`W)MF3zJ&&F&ZB-_Y9vgU5&=-z%KWWi7JA; zo5rv@!IjDN4@?LFwRvCJ`SFu;Y3P+2-DV^b7;8lHYdk)1ehu61Hvy2hDQNOlj3g%6mf^(uMB<@R(+80ERbfbxA3IBI8 zEvq=onL4Uen%7xTF0f&L{*}2VwbK2aeMp=g4G#-ZGWlecsQCKS%w0u>|11`@GNayM znl(?*iQd4F<^Cll6;GoCOXO<3)A39iJ6SPE0hf|lx;S3PejsG9&5N+Q z?6Yrnlsq?@h6;74FAECk96At#s{Sl4;D*$_Yho@AKq;u$DR^P8cPVPRXNlC7pz5x$ zEYNnTY#q#H>Ol``Ai4bd=;)=PrgUe)m4KlpuQty4Mjiit9cR>81>R3%s3{nVS~XZO z?8eXwbbeP}oGUTZB=91VMNs*TegKmAZ`m`=@3r1vtiG zr`c=@R~%ESgJ5dt8_QLBlQ<&5T_c zYBDa2s@0pWcJC=K7M10@lli>1ju%n-uXXHgA4ln5ExTTc8i83@q&KUgnmrUs*?~?} z3$6k8LVB}en4m4rk8B#i18GeJUj_%g`Jn!VYIF7J+N(4|8TCTP$g6Tag1*40#=WNk zE;LfprWx2dw84f!4uir$NwUZ&n#vkMk(=(W*#=sXQ<2JCL$|XNRRybTw&*NNJs&2p z6E`If(ymFCSs>U2X1V^g94fO)?|Ox<*NC-`8*iUxrHh(Z7Bls3T)YlKNy*o5u-k=h z&oxkjg*3)1Ob=hlG5hHXs*D$H#;oo;cwlN;kq7pQdels|qK!gH9_4~rRNXsH-o}GG zkm`iR#bdi6k6VXXVBIK*5~7k08lwgzQ{5aucQvHc*<}flu)Ew<_l;hGWExqV~~bpm7xg*@^*eZQ_$8?H3JWUTFrj{-@rKD)V-Bei%RXUPCMD zz+k_7aDPz}CyXNWBXyq!nK*EKOUG-33&R6h4@+*3BFBW{T~}6J{&a&Vc-hf&f%}Sr z%5nVtsE&NGh<`)T$Jne;Gu2or1;3b7I5j`9QC}TD(dzFTdbz_-(a(vlXH?FnX2qAJ zFc|Fc5wrH`2AG9^7@GS|^Hys{z7;o*?|P-X{YJ@1$6mcc@SGxX(gSQtI%p1?&iN2q zH?^%4Uc}Z5u|=9w8>xb*ppcozg1YNdd9eG5J?!@hcWnxBAsrY81T`eQwF5WgK`26b zhH%(6K~{!go0T>cJsfnVe@ZEuW~MK z$qdA}4?VGPO2xmh79)JDDj0ja52W3EX!R`aT%@2<cR+Knslk{_xmGzb(KphH2$24-EruS zvlLXtlwib)at(vujRP^Q3X^?VGNEoqxm;`D6_eh>ziXu%+df$8l3`Ogr9-s{rgizy zK2M`7ii|qvPjx1zQ+#MH?C{f_3oVzNtxCX{8qmYLj3Pmvp$2>p} zGh&NGjc-vl{@1Y#gQD`z>^&gW=8(-0StKgvKYxz)pWh4jOjPuuP5DF{kw@n9Vg@!C zk>c8!@N)8KdS$(KWko-%4MDZ1PH$^=X{%qu?T2&96^>N2ZT1~$luseYWE@a7W9_f4 z$!&X?yu!Kj7p_6+<+f;fO?sBk+*VvP(htPL6@KvRD6)nYQtUgGU=pwbr6a$x^U6pm zekt1M5^iD%)6mbeQi1Fu=foa6O(z_{J+&p13kQbolH!9{uJp@3-Y*83HaueF(wxCS zK7TgXIUIevCwS0Bm&$mhNE%wIh9X#KvnU4-xZ=K%tM;}tY(V!$Z&Rj*+-@z8VQPdm zM_2O>&^x^_Y$s`>s5yVv1yVeuiax<9kI{NQ}z z@c!$qMiNf7qtXU)9sQn7kzgoyM0Zd_e<}Qc{7R6wGJ-?fO0MwmyRpNiB6cz=oP`Tp z!{pYMw8tw<=(!?(%J37IOKgX9sHB6d(&ar_<;*hPO2tz5Pe*({{2rQ#oibvJlMN>6-$7!urRW;(Y05iH zWr;Y*dIG2E$gM6dD18T~Y?c(yySNvKCy?W~^MkZ1{Is2&N_1+&C(5O*?qRgGI4G$l zT{15Y>TJgvZBFqKWqGe@UNWH3bGvhD;8Wc6lp^Ia6~35m6fkz18Ho0HL1c!Ev?=+Z zdAI7YT9Wp9QXw|ywK&KXcY_`#o+$OHo1*Yd3ADgkTNqdruE!p)pz>WS^_@HUy!MO+ z6DVIs*_qTJm%tHx3^p>AT)(NOTqq9SnMN4`&;=gv;(y53UEBR;TRj zb5r%HA-yUwgGLn@*r^P%QAOwaJ-P!!=O>g6>8SSR;*k;KDfTC$fVLY$>P)t^j$F4v z(R|RL5wV~1jL|d^=|7~I%!xtoOgG$rxp`|yQ5ipYT94H8Av!2@?gff;xzfJ#0IpvU zna}v7;>_){^jBlk3r};0GGTdzoKMNKBf_LTmF|zTW=s$GPba}(*9Mq2oo4Ltouxi` z#oi7B1?1|16oN~SWl#`(@EJWH&0b| zo?99|rTL;Dv8i>bGE!Sk7kS;UM}K$xMLRO3cqXhRQ)r7pGaqcUk*jMI!rb8OhJX|4%Sy9PA9GK zUzsmdTlGGmH1w5x!(GKn4$ePGs9PnR$%h^uZGSKgrIq; zuV2-&&6-#=^4)X+u7xJ1_?1wZHsls#$IA9I)Nq=m1ki|xl}ds_UnvS4sRWR`Z7#qJ z+YhJC46o((g_N;0(-|;qA(J~lUB!4xe@?`~3mC2+VDnjbr*v<`giqBGSiey#1*c!y z*Y@+o;yC|dZeWE6hac!#lvT^^A9g@+{c1z;Y}TaV?~{*34?lD@pr+9`n!=g>ZT0g* zm9)SvJ?N^5U-niO-{mIuUO>&qO%d|Z- zA{3dar5Y2~nR#n`TL$XU`cm>R+p0^`gSGNf9$yu~cvlw0 zhMJU=bf<8s>-U=^DSYbMUkv|1W&E7}(NgL8=KsI&?EWiT-gx8sKT(!KtiXr|B@21Y{nLk zAqhKJ;*8qlJ1ne(Y-(ZVCef;B1wSh6p}UBesymei>#EQx z>b2&N!$7JY38n8 zm)&;}H=Dn8{r+!vso*}z;MmX4?`@%x8^tr$Ny-Us;C(>dN32}U+Q#5TWk6s5u1Fd! zI?>u+;%9R#A@y#;i^UQW7;^+SuDd!9S$fvp_ID6 zFMF%62(e+i4{tQRDOs0OVRLE`NspksD&pz`f)K0lEc<5Famkn#PPwj z|E92cz*a1J|Lfm<{Qm(u_5UwnK4s1zXy(eQ%rt*yoY!N3uc-naOCPom;`ra5+uqp; zQS#qNBsl=`&|e%Pu540FiX2Fh-nMQ}g74u^&I>U$8X0t!xu)&RH+%qE5C8o(e)hIuc@giE$ZhZa#0Yw z=r#U~lgf#v8PJYGxLr8((<9~IwogcZ5f|UHpJ|4!4Yf=v?|lP(8!hV#22I(01U@Rg zLc5V72i@GzlW#HjKPf@G-WaRHB5kcJx8&Ll&1==H_@y*Y}gs;ZAD5_13 zo{NKvN>2^-Fm&5l!fDnq_M%O|(*6m$zR*(tf%W&4miV7&_Lc7c1mMP=e(-e1Wz_`2 z=4FkgNoh2;+-8HiUEqNAns#5#Y)vPIYW+arW#qpq&>xD17``EP_tjBY=&!VM|K#}w zNx<9krYbRl%Jxs=Rrvv?dq~K`A?qooRKOqlXyd;s$6pW+A01Kt>K6Th#T*#X8fj|r z;uqpmNn&fu&xDG#tILG*NPI=k1D51lt18Hf9!}~c`HPFel9t%=-v9%WhrmF4D}bth>(7L$3-d%p zebpI|fS4OJp}I5i7NbnqAcwJxPY9${k}>!n=)~}uH>xY@w?3@6 zDam(eWxqGyH(|mMVvg>5cJeS=f_j>;`r|p_m{#XG6!&rZQjHZU0KWeoj<{B&Em`FD zZ=6SPw+Rl0O%@sald1l{1W9nBF&#OQj`VRI{kp@gBrV}82f?22gBmQHh@1Kk8>BTUgTkU|7&=gb+T4T>4ZfQx45^oO~=v=QQ@x&?YQ?C(E?X zP0285ZM*6LU-vCFMx&p`!kk|;`G?Q~jxl~vRrrIF{Ra`-e|-ebze34>5Skk*fCF6u zbk}@-tt_rdPEIXqwXkN0bo!0jc+$`pClMk6d*zU;(aXrc?bXJfL&Q~a^a+9xM0doP zy6*M|C=eq)5JSGfF%A~h&RBQ(G!-im5S&+THn44xKgGEH#Ne;2+`;cbr zr*>D7b7a!2)w$FsWy-?e!{55S)B0DO`nQ)OQugKY2}(KN^hMg*_rKSRxw(%ZwkRsd z8x;`r)=#V0vSMe)*b&&+Hj^v!`7Rg*^~Q-ROuA9H;CVCqt(y|)lg__d?ti;aAV$1j zwj+obdF7w`SG3f9p#3X$XQ$pD5VCa2htF&}zp!^jJ=#rK?{<;9VZ!Q81Cti_Mx90f z(~$oA%K%1;y4Sn_!RZI&{`?&`+)}L_EI(Hrt3LhIV^?&1Z2Qr<#9oK3snMB^yWt)- z9{QlCJO7)WetMO-WVjETZ1q3)q&Q$3p0^b ze@(nn_ayt!!L{XJ7YGOwMDYh=D&%$)Kc4H^3o9^9?kfhyMhmF-pS_TOd+`2XvK_EN z``YA)W!4CrQ)%bz12@b4S0y`xP`K_7q44SkSt#6KvN&+p4bhwm+(*E35@OIb8zlv~ z@EkLFMv(z(gk5gWFGgRr`EO_G{+4ZSreQ@tIPL#1wr|X>Hv2n`0=8gP=F^=GL`h|) z^{G4VO-RIYhH8WW#QeSez&HLch7l}9ExNzt@6G~9ZvVBX1)L0Jzf>=-rZyr-1DTjR)|-@K2q4UTYo8snK! z?5mmhNjP4FXnfB}kRJ}QjwPviVnEJOhQp@GtVGpf-))H*!4qS5rVZob;=1b0AB`lQ z)b*a&os(%4{30?Nd1bfnirW(oMh420WM_55#@SOx;7b~Nb>lv2plf1H%rsVTHYzKt z$75o%jp-_;^6bkR!7Y|M2j6DrCfToedb-_%JU;HKHQ2Vp0KRS8w$sSqP-`+$L+;~G za&pEb(9X$hi#0V%6uOK=i_CY5wwjpXwYpudUE81HIr{#*seG4Ey+T~u_VVQ~pbV%? z=2QHprH(;P6D(Ftjzab9TGAQimW>Pj*r1o2(%d{#zkHU%i9aRn2O-~ebGPmO1dAt? zmAWaOwAHPd@4Nt4UxCIkwz}PjbU$hAyOz+qrTw(}(q8r3SK=&dlBLradug2s7_q#> z!dr!h+P`-U%2zyTw=k~q$1;aE17cmr2%1XG66r3ut(nTZk7PcNJ@F`b)%H25$U$RP zLrM7BOiWp50wIn2*ko7CcQ{!!gFpJRx?(;S*Tw%jsVuARcH{V={Rt}*50fty2!o;d z-+NqSFOwid6y~Rh0wiKBW1Q4(`-+{V9MG!PGwf8Ms%F@z)orZ4d!e($Z+hHdJtn`I zB|`!Y$+q%!hY1&}$$1jpnh*W3twANh6B=oC7vB|X;q#MA%VaaQg3~2Oc6#4g+8v58 zi9sS&S477CettoM-JXgeX=^|Ho7rBe3_s9MsqTioH!!f38;kvYjjRjyc`_N}JwB1L zCl07Wu05EKcV3T!Shb&7{RlEh;=Tz&6%*pzd4xqo1g26Dgg3 z^68J295A`k-o@R8G|pGkst^lfTE(7On4jB?CmvbvDF}(dtmQEjilrKm9s|ag*d2Is zcS7aB89T=>MUXGu_)itKEEsG+*gUd`Ru@ii{0d=3zFs;X-nZhG|40_{x%I>%qchu zZ_^Etg2;ma2O#`@Mp`Q;eX5jNN50Pwo{+bKw0>(1O-ugXnD)0VyJcbu0PE&6NKEX) zQWz7O1_}>erQkvDj;)<6 z;)k`|^=5pVH?dPM*Em;y-}N-fbLF6OdrZBH_*%$bud&Qjx4jTB*>76E?VDWFnyS!P zLK+Q%y4BvGm0-|ky^r#gM7xxTHY=ZE+U=zvsx%7;(vNmwCS%R-&nR=;lV>CM0{tQ? ziZ-mZ88(e*ejdL73bT%<_YU64?lAT+G=K*$qf9}7b@%Ct0*~-p#k54*f@kK0jr|@@` z@bv`(k=*)<^W(bb@rk`jkNF9OZqgm;C&rD*XLh-7U;S!Rn0OP#D@B_gYQ5SrVHzXS zZTG6L-zji+!oCuoB3Irf;y*xk^o?ssq4 zlI?V6i0DUcSEdPk`gE?0m5Jrrxr*2eEjIUud^aO$x&xRJ&Wp7vo8P=|w^cJXZ$%>F zWQZi4AOLwKCW9ffS##&8K@iYP4$JyJ>)mK#T3lzIRkL9B0{wjlwXRO&RVuY+DfcbR z>!~9ga?GLPI_I+n_64%}d$9q3bYGlj_xm-4;WH?kl!5agtSMa`SI;{c!l4nyb& zDOSzK@u4i`M#mu4Z+Zx(3;sb@*kui7Hf}#5)+7+)YcJL5g5!`Eq3#ml$NgfhL{;Z^ z7G%$Bn!;HCrC&SDJ}8lufZu#3J;4!gu=+S;_GEZ7`R2XJK0ncf51uKMTFh!KS%usg64cg#` zV92wH{q#jM_iS3K4T06BlNTVDD*x6IIR6HJ2u^hkhR>ndc!I>y=jr{TcV3TzJO2vu z(@vPjoh%i}iB;J+aqlLQQoCGiTEbnAjmxp=dj&+dLB9WT?Ad~I=37$*?yj*0COyd0 z>~ZxMux~mrg~A=>p~yzR^0$(YR&!*^`pMlhgnKk23&}AZ(XF!m$d4Bh?>l9RN8*#puYGmIthj4v)xoRnT=Wfto4c1pb)>u4!)T=m!+I1 zQ$ipohcE6GJqP$cDLFt5fjm^qmU}WKAu6^9I52GPVv^Kl1{7d)P_F5GP{JfODy)$M zwRYrIz5}pw@%IWfApfM`mzh(4RMT_hM>KyU=I)%!8V_EzK0#iYm|*C8V1zslz__*; zo_dGHxb6{rVo%azL2Ad(Qeuzov)deZJ=9*k?99)rJ)e=dKVS2`jAlBChn;9x*uJ(- z;ChI}fPw22xuL_10vCEq&_5)LOwc9cCsIWQiNCsB}RYOkNqwDxW`_LotalW6Z144sBkhyWrgQ+$?N3USx*)OX16{u!1C@F8{ZVxb|%!fwNcGqj^>sd1adLxf^K|`6QoR18hb` znqavL_Mzl^O(rX*WMsYH}onslZ2~|5ah?0P;510~W zK3FDC?0V(POVN%eJmroN_H1iG>vmmvwBQhpz3~=-S53gqELc=XR_nf)oCbE5I7%j| zs^6_%iKb>~qc40&I%+E*QpR^&wvd~XaZ|sFH zpTBG<8*kvJhA#N2YL7#~5sr1NJzv^C8?Xv^QOlcV98T!mT|n$FE?w}1_4p z?f&BPH#cFqhU_rh=8pu<_8)$8JvwgO3fce6DvrU4Pc24tZ|gw<*m2n-IhOOJ5w61OaS@L(RH>niBYz4tIrJ+N-E` ztFXbE&jDU6aEj*fG9!_`2~r9|+OFQN+|=rubYuARL%U}Lq6hGNA)v88C-qP#vlACT zBY8hd{0{fkb%K9<^S-mBo0HpFF7sxkzCPr-b$}3SC8tNuRi+J4R8dHLf^*5^rzI#6 z>dgU0H2^;n3WM?Q&xo{FHqE5ml7%|J&;)4t~9xkuBLtA9Ai%c z`RZ)eg|BXP<}=fItHhcmJ(A*3$bxps{U#f`;OtDknFm_6OOCzZSC6J!)~qr~mx~IU z-wZskz=mh`vd|>{P9Y4!_o!dIPyJaVAcPN||Ecr#lNZkFAv{T+@b{2mO^F~pNY9cd zP?oNP7+8!OH%mR&#|eiIBV`2?COgw;l6*iG!zAj17^7aHw{yP?r;&V^hU4duygKZD>D(uJa(zAJRH z(DR4(f>{Gmm}tKgWp?LiOO}&fY*=VZl?fzUD4j=p)JY9*Bc1;7|@EP{3rP9N= ze0yI290b*c6%e_TXBQr4>HR2je#U#h>AcqPd+n$%G1Vq%Ow|{#Y}rNGtO1E{IRtFS znD>=z;h5H!m=J(IKq$2@%OB}bM3p68P-k_Ex;JZy?%K~xWMhBZ#h83L3y2yc(_sF_ zzJ-2jUBCi`CAzO~9O*eFU1V7i*=5oVlyaZphu~eZNRZfbn=tJV6m=Y~Cxt)|s6ChB z^JQ!DqYjHCxyUZL&@V8mqXmyBz^Uk!#X zSONm>zs8F4ydAkG=iFxh?3kwBm^PHk)#@`MqRc}AlC@zy$$owrTLFSBdbU)nug*H2 z@eq0ESi0aKnNSA<6Ql3+-ufB*^(-6mfA=0&-@Qa6*zo9~U4{|I3* zv_1$Vb~O6H1l?|Ov+IyzArD$}TLzbjNO1P!>t}p`C!H006bfMYc84;Df8k_UfwdyD zn&YvgAAZ{c){4$b*NQ&<)(=Hm3uCU84D2Q(^|}w@XA(&n>maeyglxdY%hpgFK26qj z1Ol9Cq$jH^c*6NZ_7lq?Z)3lhFNOHp#h-Ma*jiud=W9S%fbt$ff{-*ntUEK zF_!p^bw@vMNON#-unS~Sed@|Nol{UCJ!jcSBJ}b3^ez)hknXGX!cmGHjiC&$&4&JM z9)h_M==Xym+~+~qktXQ-yBKpXJV%-YcF)W-DRCRfc4|oV7sl;h1HG75(DB%>sHh0l z@&e?F`3UDeyZ$q=8YIN@T3m(1h z_&&T483Wy2dS26cj#dB_;)+Y!?+DH|gx=IEvzGk4Ba9rV`-9sAA-Ko4|9@tFKfc@9 zo~kQ<-OX-iu1L^za?zju1s?yGEOFUY?TyL%u{v5;#lHJThAF-t`UO1dg$hNb7|+_Z z{bqP?iKpvSbxdZ-)tFX(iM7Q*;ZK+C(COm6!--UERQze4ri&uRE_i9$#{z1~0@XV% z&%!6Of_E37E;dQlX$GVTeVhnrC=I*bgXvzYo1p)||QNOeqiMu8knI_NKgZ-8Js>j|r`!-EM(7~i9CZ?vl`H?^m9D(LPb6M(OhiM-iIVH zWoR*&#Z)X&v_o4W-&pq(H$dD4zmB!rJJ2L5*RT;2_=A#mVLDbwsPU70qN`t={21-R z_yR`A87ds+sxCU>i?U$E%*bMhY0%!F`gJT83@%8HA4?-v+C%nx%8k^a3kD zl6lJTmA^PfLNnXL={o0z79)oOYfF?CrZM%)H0FNZtdD56u9SW%y1h&twQfhrFlGMa zF$aOV*h`XbB47C#WO&{=^&KgA%v~SHA_}nqEFZslFBBT^Q1X$IEbPabayg_*CBC9q z@FS1Tm>4U=20d;U;w2-vRBR8XS2R{ekQ7M@s73=tMPcuPnMZJu-qwLX5f(%yR2L+6 z0pxoKGW`3eZbf=pwW*7<_n!rXZ1q}IArIAPj17cfR1}fdJagEhI1MQoi%u`JEfB4@ zdY1m6Rfwtd7^Ef4=U=Vg*WaO>8@QLL*>=8WSJpbxhnha4#AnN&D}fym(tnTEKPcE9 zIZ{%n724QU5n0RP&ZBM9%a-8 z(&jI*zTsJ!ND@g8S(V>N+wd!E=fZ*Zv4;`M$tP1DY}s*nPXg<1i(CKBy^QSqGjC=- zT9|hHNIrR-6pXWY*gUz+wpcG5BzC=?pGUp&!_+z`)Qzt9JP$Z|3@;8_^Nse+S}`th zKqj-Sm*qd)D!hh|tC0$2{PSIjCkNi^iQ=_I<=Ubh2U;BF-JLV9?W~t%NkW`bvAPF7 zw1P7nFW}DHS(AB`COMVLuEIxREegYrSo8Y@gh_UJ zhNO3mtiA;x&Fb+^`7ZCwY{cF31P+yeKk?mpy@#ZTZxf6s&eS~UyZG=$M2W?nBC}oL z>lhodJoI0Sj7$_Vjv-WgkGU#i?4GN=nEwUw1u9PzKDc83kaSZLdF6xN-OJ*ll?^OqOs&Qoc3q7WIVv`5+0>dwu0P`(V{mNWC0*;X9);vsJy# zm5pRggWJ9Q1J=3B2m!}cKyZoq+>BrBsgF`lAj$9ot@eLy8Y?t%U`03Zh)dt*_39$q zY!>>c9@J9nTj`^*T$FWP)NRqDdb(5Bsiuj#u8(@%btOAJNB;cnwnzI8ZOM<15bwY5 zKJ(jwQ@|zdkiePOIg#_b_^~h3k2w{-+3lurN%Z5QSRHW&gM9|9>soKMNZ33|FD$!LgzV zm-hsG1s);0cpL|;D#UF@+vT=*aO9QhyxiG zd_*#_g++(=($;OgoiVq}rtH{LBKq&@h8e(f`d;k6%d@>2xF%^`jn5q|<>IT^>1%6k z{H42oYb{HD3!I3%efSk{GXc~{2!&+u#LG#aB4^gTjc(q(Tq6b;y#3-kx}GLI3X`0< zI?j6ft>c@wE!?sG#=ftXchWUhP0i$AQv=J;Fe9bvr-m%mXg@g~@{H zhGUDNRl#XTzsLHOj3#ovHFd zKl$p64~||uqW*2~o=p#az z{{olEKhw|KH5;ljwj#w-S7lbgTI3xB6qDh^C)zh%r7a{)?c!o8odH}A=KnQ|a4MosEfLCyE1@OoN z9>5D1gfJG!K+9ivfeBX-z=zo202hQXM!kg;AfvH`2#nE0hzJXY(Y%Ee7{nAAvox1y zK7h165IP&u5APNSP5-WW0o)z|T=(FJ71>l*d%IINifvkKKBGcz-E1q|mq%Jo6Zej+Xc_l4wUWN^T=!svN|@8dw_^TtJ> zQGuVIpJO;e@*|QX_I|nK4buvvKlH%;fm!;W=0-OE>+9=y%tvn(FfdFLzqc1u>nFLg z!}>rlTNw^~Is^-4Si2K0#}E-ml$$VXmxRiPcY%AHn-s!IfV;R627*~S`g^KDRp5~> zKFrGOX)7$KF)OAGd*CsN&y&Z?!~AalKK`uw{_lA|AMfq{%y$20;MqN46JGtO{`d9! zo5KG$-gG`$tFoyq;@?5w!4S)8>({kyLv`hWa%NbFC#RmstUe=ey>o>^j;`Vi-9>X`Fg3yyq7{b~ECqZ}*dpIr3NkZc@l%ef@Uh{HOZsbXO%` zjn~**F;gdgeR9~>&<*RkT5JD&y!Ys5$nC#dAEa)z$$h>5h0TBafAg=t+*DtS*1S+K zgr&ISU6U>|F|Le~xol?oZ`F1CirX^!Ufz=qeA;CHi#;(;t?p=_;O%u$Qqc>Pa$H}@ zuPfga=JIHjRo%XO?~5H3y0@5XEq{9ZXEJ;3Hihn1o8@!DT(8~R@bBv1bj>fWPyd8+ zrc~|^MNha5@7dtidriJ9rn2Qwyw>OXQtb)zjDBh<&C$H?o&Gak@bjW+pB5KI?Teiu z{?7hR`cnx}E~S~PlLPleo#5`@=&_^dMzUPQoqMm6pH7sO|Hubj1Es+g7^coqScWyCP)?2PSZFAS8>)pU3=Hq{Cy1CEUGWX#R z;LQ#n-maRWnHTRN{wwjKC5FF6;pNmurKcv{(bQI+Yso z{Q2)shaxmuST{wUxU?xE>^bnr>V^BXuH61sS!tJ9;J{97%? zvML}&%fn-#%2fqmYX5L)BiC)cfED3OWm~`QToER+re>GZ+E1*G-aBe_gpREGziO7g z^uMp0lVrPePMyE~?nLXx0KeZCt*cl4UbSJr>!z3uXAjkXpV?hAHCZ#ZHuuUZar9zs zjS8%Y>6@y!d@YNZ%f=|D>*bTWfGZE;y&&Cdm}?mt1YlX6+43!G#24|CKUT+dqX#ZS zMHj@q-!mU?#kr$5%v=Uk(H7xVWm9sxbRtbcGwtaecrZGmffK h;K*k8XK^*D|NhToOIkAVm(^d8c28G7mvv4FO#tQ4g%JP% literal 0 HcmV?d00001 diff --git a/version.txt b/version.txt index 6e8bf73..0ea3a94 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -0.1.0 +0.2.0 From 1ee31ff9b18ef984860d4319bd203e312cdce68e Mon Sep 17 00:00:00 2001 From: Sun Peng Date: Fri, 8 Sep 2023 17:56:46 +0800 Subject: [PATCH 32/34] feat: add runtime diag (#297) * feat: add runtime diag * add diag_outlier_ratio --------- Co-authored-by: yingtongxiong <974106207@qq.com> --- configs/7B_sft.py | 2 + internlm/initialize/launch.py | 7 ++ .../solver/optimizer/hybrid_zero_optim.py | 16 +++ internlm/solver/optimizer/utils.py | 3 + internlm/train/training_internlm.py | 1 + internlm/utils/gputest.py | 109 ++++++++++++++++-- internlm/utils/megatron_timers.py | 25 +++- train.py | 5 +- 8 files changed, 155 insertions(+), 13 deletions(-) diff --git a/configs/7B_sft.py b/configs/7B_sft.py index 027d216..cbcd0e5 100644 --- a/configs/7B_sft.py +++ b/configs/7B_sft.py @@ -56,6 +56,8 @@ data = dict( min_length=50, # train_folder=TRAIN_FOLDER, # valid_folder=VALID_FOLDER, + empty_cache_and_diag_interval=10, + diag_outlier_ratio=1.1, ) grad_scaler = dict( diff --git a/internlm/initialize/launch.py b/internlm/initialize/launch.py index bb34603..079c2cb 100644 --- a/internlm/initialize/launch.py +++ b/internlm/initialize/launch.py @@ -98,6 +98,13 @@ def args_sanity_check(): if "valid_every" not in data: data._add_item("valid_every", 0) + if "empty_cache_and_diag_interval" not in data: + data._add_item("empty_cache_and_diag_interval", 50) + + if "diag_outlier_ratio" not in data: + data._add_item("diag_outlier_ratio", 1.1) + data.diag_outlier_ratio = max(1, data.diag_outlier_ratio) + if gpc.is_rank_for_log(): logger.info("+" * 15 + " Data Info " + "+" * 15) # pylint: disable=W1201 logger.info(f"seq_len: {data.seq_len}") diff --git a/internlm/solver/optimizer/hybrid_zero_optim.py b/internlm/solver/optimizer/hybrid_zero_optim.py index 70c63a0..5031fd3 100644 --- a/internlm/solver/optimizer/hybrid_zero_optim.py +++ b/internlm/solver/optimizer/hybrid_zero_optim.py @@ -570,6 +570,7 @@ class HybridZeroOptimizer(BaseOptimizer): # check for overflow found_inf = False + found_nan = False # if there is INF values in grades, compute_norm func would also returns -1 # thus, we try to avoid call _check_overflow here # found_inf = self._check_overflow() @@ -578,9 +579,13 @@ class HybridZeroOptimizer(BaseOptimizer): if -1 in norms.values(): found_inf = True + if -2 in norms.values(): + found_nan = True + loss_scale = float(self.loss_scale.item()) # backup if gpc.config.model.dtype is not torch.float32: self.grad_scaler.update(found_inf) + # update loss scale if overflow occurs if found_inf: if gpc.is_rank_for_log(): @@ -593,6 +598,17 @@ class HybridZeroOptimizer(BaseOptimizer): self.zero_grad() return False, norms + if found_nan: + if gpc.is_rank_for_log(): + logger.warning("Nan grad norm occurs, please check it.") + send_alert_message( + address=gpc.config.monitor.alert.feishu_alert_address, + message="Nan grad norm occurs, please check it.", + ) + self._grad_store._averaged_gradients = dict() + self.zero_grad() + return False, norms + # copy the grad of fp16 param to fp32 param single_grad_partition_groups = [] for group_id in range(self.num_param_groups): diff --git a/internlm/solver/optimizer/utils.py b/internlm/solver/optimizer/utils.py index 63c8c25..dbfcc34 100644 --- a/internlm/solver/optimizer/utils.py +++ b/internlm/solver/optimizer/utils.py @@ -311,6 +311,9 @@ def compute_norm(gradients, parameters, last_stage=False, previous_norm=None, no if total_norm == float("inf") or total_norm == -float("inf"): total_norm = -1 + if math.isnan(total_norm): + total_norm = -2 + return total_norm diff --git a/internlm/train/training_internlm.py b/internlm/train/training_internlm.py index 15b019e..a24317e 100644 --- a/internlm/train/training_internlm.py +++ b/internlm/train/training_internlm.py @@ -354,6 +354,7 @@ def record_current_batch_training_metrics( set_env_var(key="LAST_ACTIVE_TIMESTAMP", value=int(time.time())) + timer.store_last_timers() if success_update in (0, True): train_state.num_consumed_tokens += batch[1].nelement() * gpc.get_world_size(ParallelMode.DATA) if is_no_pp_or_last_stage(): diff --git a/internlm/utils/gputest.py b/internlm/utils/gputest.py index 27ae9bd..ddb4932 100644 --- a/internlm/utils/gputest.py +++ b/internlm/utils/gputest.py @@ -9,7 +9,9 @@ import torch.distributed as dist from flash_attn.modules.mha import FlashSelfAttention, SelfAttention from torch.utils import benchmark +from internlm.monitor import send_alert_message from internlm.utils.logger import get_logger +from internlm.utils.megatron_timers import megatron_timer as timer try: import GPUtil @@ -24,6 +26,23 @@ from internlm.utils.common import get_current_device logger = get_logger(__file__) +def empty_cache_and_diag(batch_count, interval=50): + """empty cuda cache and run diag bench or tests.""" + if interval <= 0: + interval = 50 + if batch_count % int(interval) == 0: + # there is no need to do diag on the first batch + if batch_count > 0: + if gpc.is_rank_for_log(): + logger.info("Empty Cache and Diagnosis GPU/NCCL/Timer ...") + with torch.no_grad(): + timer_diagnosis() + bench_gpu() + bench_net() + # do empty_cache after the bench + torch.cuda.empty_cache() + + def benchmark_forward( test_fn, *inputs, @@ -81,14 +100,78 @@ def get_cpu_temperature(): return cpu_temperature +def timer_diagnosis(): + """Diagnosis running time""" + + if len(timer.names) == 0 or len(timer.times) == 0: + return + + world_size = gpc.get_world_size(ParallelMode.DATA) + if world_size < 2: + return + + # if gpc.is_rank_for_log(): + # logger.info("Diagnosis running timers ...") + + # detect slow rank compared to other ranks in the same DP group + running_time = torch.Tensor(timer.times).to(device=get_current_device()) + avg_time = running_time.detach().clone() + if world_size <= 4: + dist.all_reduce(avg_time, op=torch.distributed.ReduceOp.AVG, group=gpc.get_group(ParallelMode.DATA)) + else: + running_time_max = avg_time.detach().clone() + running_time_min = avg_time.detach().clone() + dist.all_reduce(running_time_max, op=torch.distributed.ReduceOp.MAX, group=gpc.get_group(ParallelMode.DATA)) + dist.all_reduce(running_time_min, op=torch.distributed.ReduceOp.MIN, group=gpc.get_group(ParallelMode.DATA)) + dist.all_reduce(avg_time, op=torch.distributed.ReduceOp.SUM, group=gpc.get_group(ParallelMode.DATA)) + avg_time = (avg_time - running_time_max - running_time_min) / (world_size - 2) + + diag_result = running_time > avg_time * gpc.config.data.diag_outlier_ratio + diag_result = diag_result.tolist() + avg_time = avg_time.tolist() + + for slow, name, time, avg in zip(diag_result, timer.names, timer.times, avg_time): + if slow is False or avg < 0.5: + continue + msg = ( + f"Rank {gpc.get_local_rank(ParallelMode.GLOBAL)} is slower than avg on {name}, " + f"Hostname {socket.gethostname()}, " + f"its time {time:.2f}, avg {avg:.2f}, " + f"CPU temp {get_cpu_temperature()}, GPU temp { get_gpu_temperature()}" + ) + logger.warning(msg) + send_alert_message( + address=gpc.config.monitor.alert.feishu_alert_address, + message=msg, + ) + + # detect slow rank compared to historical timer data + for name, time in zip(timer.names, timer.times): + if name not in timer.hist or len(timer.hist[name]) < 5: + continue + hist_avg = sum(timer.hist[name]) / len(timer.hist[name]) + if time > hist_avg * gpc.config.data.diag_outlier_ratio and time > 0.5: + msg = ( + f"Rank {gpc.get_local_rank(ParallelMode.GLOBAL)} is slower than hist avg on {name}, " + f"Hostname {socket.gethostname()}, " + f"its time {time:.2f}, hist_avg {hist_avg:.2f}, " + f"CPU temp {get_cpu_temperature()}, GPU temp { get_gpu_temperature()}" + ) + logger.warning(msg) + send_alert_message( + address=gpc.config.monitor.alert.feishu_alert_address, + message=msg, + ) + + def bench_net(): """Benchmark nccl performance for slow node detection.""" if gpc.get_world_size(ParallelMode.GLOBAL) <= 1: return - if gpc.is_rank_for_log(): - logger.info("benchmarking network speed ...") + # if gpc.is_rank_for_log(): + # logger.info("benchmarking network speed ...") repeats = 100 input_data = torch.randn( @@ -113,20 +196,25 @@ def bench_net(): allreduce_time_avg = allreduce_time / gpc.get_world_size(ParallelMode.GLOBAL) allreduce_time_avg = float(allreduce_time_avg.item()) - if allreduce_time_this >= allreduce_time_avg * 1.05: - logger.warning( + if allreduce_time_this >= allreduce_time_avg * gpc.config.data.diag_outlier_ratio: + msg = ( f"Rank {gpc.get_local_rank(ParallelMode.GLOBAL)} NCCL test is slower than avg, " f"Hostname {socket.gethostname()}, " f"allreduce_time {allreduce_time_this:.2f}, avg {allreduce_time_avg:.2f}, " f"CPU temp {get_cpu_temperature()}, GPU temp { get_gpu_temperature()}" ) + logger.warning(msg) + send_alert_message( + address=gpc.config.monitor.alert.feishu_alert_address, + message=msg, + ) def bench_gpu(use_flash_attn=True): """Benchmark single GPU performance for slow node detection.""" - if gpc.is_rank_for_log(): - logger.info("benchmarking gpu speed ...") + # if gpc.is_rank_for_log(): + # logger.info("benchmarking gpu speed ...") headdim = 64 dim = 2048 @@ -154,10 +242,15 @@ def bench_gpu(use_flash_attn=True): speed_avg = speed / gpc.get_world_size(ParallelMode.GLOBAL) speed_avg = float(speed_avg.item()) - if speed_this <= speed_avg * 0.95: - logger.warning( + if speed_this <= speed_avg / gpc.config.data.diag_outlier_ratio: + msg = ( f"Rank {gpc.get_local_rank(ParallelMode.GLOBAL)} GPU is slower than avg, " f"Hostname {socket.gethostname()}, " f"tflops {speed_this:.2f}, avg {speed_avg:.2f}, " f"CPU temp {get_cpu_temperature()}, GPU temp { get_gpu_temperature()}" ) + logger.warning(msg) + send_alert_message( + address=gpc.config.monitor.alert.feishu_alert_address, + message=msg, + ) diff --git a/internlm/utils/megatron_timers.py b/internlm/utils/megatron_timers.py index e319a80..d5d89e5 100644 --- a/internlm/utils/megatron_timers.py +++ b/internlm/utils/megatron_timers.py @@ -16,8 +16,12 @@ class _Timer: self.start_time = time.time() self.stream = torch.cuda.current_stream() - def start(self): + def start(self, reset_all=True): """Start the timer.""" + # need to reset all timers in a new batch + if self.name_ == "one-batch" and reset_all is True: + megatron_timer.reset() + assert not self.started_, "timer has already been started" self.stream.synchronize() self.start_time = time.time() @@ -48,7 +52,7 @@ class _Timer: self.reset() # If timing was in progress, set it back. if started_: - self.start() + self.start(reset_all=False) return elapsed_ @@ -57,12 +61,29 @@ class Timers: def __init__(self): self.timers = {} + self.hist = {} + self.names = [] + self.times = [] def __call__(self, name): if name not in self.timers: self.timers[name] = _Timer(name) return self.timers[name] + def store_last_timers(self): + """Store timers to two list""" + self.names = [] + self.times = [] + for key, value in self.timers.items(): + senconds = round(float(value.elapsed(reset=False)), 4) + self.names.append(key) + self.times.append(senconds) + if key not in self.hist: + self.hist[key] = [] + self.hist[key].append(senconds) + if len(self.hist[key]) > 10: + self.hist[key].pop(0) + def write(self, names, writer, iteration, normalizer=1.0, reset=False): """Write timers to a tensorboard writer""" # currently when using add_scalars, diff --git a/train.py b/train.py index b9fe6af..ff15354 100644 --- a/train.py +++ b/train.py @@ -35,6 +35,7 @@ from internlm.utils.common import ( parse_args, ) from internlm.utils.evaluation import evaluate_on_val_dls +from internlm.utils.gputest import empty_cache_and_diag from internlm.utils.logger import get_logger, initialize_uniscale_logger from internlm.utils.megatron_timers import megatron_timer as timer from internlm.utils.model_checkpoint import CheckpointManager @@ -193,9 +194,7 @@ def main(args): with initialize_llm_profile(profiling=args.profiling, start_time=current_time) as prof: # start iterating the train data and begin training for batch_count in range(train_state.batch_count, total_steps): - if batch_count % 50 == 0: - torch.cuda.empty_cache() - + empty_cache_and_diag(batch_count, interval=gpc.config.data.empty_cache_and_diag_interval) start_time = time.time() timer("one-batch").start() From 85e39aae6782ac0c48e4e27220be681949e4785a Mon Sep 17 00:00:00 2001 From: Guoteng <32697156+SolenoidWGT@users.noreply.github.com> Date: Fri, 8 Sep 2023 20:41:53 +0800 Subject: [PATCH 33/34] fix(ckpt): fix snapshot none load error and remove file lock (#298) --- internlm/utils/model_checkpoint.py | 85 +++++---- tests/test_utils/common_fixture.py | 4 +- tests/test_utils/test_model_checkpoint.py | 221 ++++++++++++++++------ 3 files changed, 218 insertions(+), 92 deletions(-) diff --git a/internlm/utils/model_checkpoint.py b/internlm/utils/model_checkpoint.py index b8f7ad6..b6aab02 100644 --- a/internlm/utils/model_checkpoint.py +++ b/internlm/utils/model_checkpoint.py @@ -2,7 +2,6 @@ # -*- encoding: utf-8 -*- import copy -import fcntl import inspect import os import socket @@ -545,12 +544,17 @@ class CheckpointManager: if self.stop_file_path is None: return now_break, now_save_ckpt, save_type - with open(self.stop_file_path, "a+", encoding="utf-8") as f: - fcntl.flock(f, fcntl.LOCK_EX) - f.seek(0) - msg = f.read() - fcntl.flock(f, fcntl.LOCK_UN) - action_step = int(msg) + with torch.no_grad(): + action_step_t = torch.zeros((1,), dtype=torch.int64).cuda() + if gpc.get_global_rank() == 0: + with open(self.stop_file_path, "r+", encoding="utf-8") as f: + f.seek(0) + msg = f.read() + action_step_t.fill_(int(msg)) + + torch.distributed.broadcast(action_step_t, src=0) + action_step = action_step_t.item() + del action_step_t if action_step < 0 and abs(action_step) == train_state.step_count: now_save_ckpt = True @@ -627,41 +631,50 @@ now step_count is {train_state.step_count}", return None, None max_normal_step = 0 - ckpt_list = list(map(lambda a: int(a.strip("/")) if a.strip("/").isdigit() else 0, ckpt_list)) - ckpt_list.sort(reverse=True) - for ckpt in ckpt_list: - fns_list = self.storage_manager.get_fns(os.path.join(self.save_ckpt_folder, str(ckpt))) - for fn in fns_list: - if fn.endswith(".step"): - max_normal_step = ckpt + # Return ckpt_list look like: ['pings', 'snapshot', '4'] + # Here we only try to find the ckpt folder named after step, ignoring snapshot and other folders. + ckpt_list = [int(fn.strip("/")) for fn in ckpt_list if fn.strip("/").isdigit()] + if len(ckpt_list) == 0: + logger.warning("Not found avaliable normal checkpoint!") + else: + logger.info(f"Found avaliable normal checkpoint: {ckpt_list}!") + ckpt_list.sort(reverse=True) + for ckpt in ckpt_list: + fns_list = self.storage_manager.get_fns(os.path.join(self.save_ckpt_folder, str(ckpt))) + for fn in fns_list: + if fn.endswith(".step"): + max_normal_step = ckpt + break + if max_normal_step != 0: break - if max_normal_step != 0: - break - max_normal_step = ckpt_list[0] - load_normal_ckpt_path = os.path.join(self.save_ckpt_folder, str(max_normal_step)) + max_normal_step = ckpt_list[0] + load_normal_ckpt_path = os.path.join(self.save_ckpt_folder, str(max_normal_step)) snapshot_path_0 = os.path.join(self.save_ckpt_folder, "snapshot", "0") snapshot_path_1 = os.path.join(self.save_ckpt_folder, "snapshot", "1") - ckpt_list_1 = self.storage_manager.get_fns(snapshot_path_0) - ckpt_list_2 = self.storage_manager.get_fns(snapshot_path_1) - max_step_0, max_step_1 = 0, 0 - if ckpt_list_1: - for ckpt in ckpt_list_1: - ckpt = ckpt.strip("/") - if ckpt.endswith(".step"): - max_step_0 = max(max_step_0, int(ckpt.split(".")[0])) - if ckpt_list_2: - for ckpt in ckpt_list_2: - ckpt = ckpt.strip("/") - if ckpt.endswith(".step"): - max_step_1 = max(max_step_1, int(ckpt.split(".")[0])) + ckpt_list_0 = self.storage_manager.get_fns(snapshot_path_0) + ckpt_list_1 = self.storage_manager.get_fns(snapshot_path_1) - snap_load_path = snapshot_path_0 if max_step_0 > max_step_1 else snapshot_path_1 - snap_step = max(max_step_0, max_step_1) - load_path = snap_load_path if snap_step > max_normal_step else load_normal_ckpt_path - load_step = max(snap_step, max_normal_step) - return load_path, load_step + def found_latest_snapshot(_ckpt_list): + _max_step_snapshot = 0 + if _ckpt_list: + for ckpt in _ckpt_list: + ckpt = ckpt.strip("/") + if ckpt.endswith(".step"): + _max_step_snapshot = max(_max_step_snapshot, int(ckpt.split(".")[0])) + return _max_step_snapshot + + max_step_0 = found_latest_snapshot(ckpt_list_0) + max_step_1 = found_latest_snapshot(ckpt_list_1) + + if sum([max_step_0, max_step_1, max_normal_step]) == 0: + return None, None + else: + snap_load_path = snapshot_path_0 if max_step_0 > max_step_1 else snapshot_path_1 + snap_step = max(max_step_0, max_step_1) + load_path = snap_load_path if snap_step > max_normal_step else load_normal_ckpt_path + return load_path, max(snap_step, max_normal_step) def query_latest_snapshot_step_local(self): max_step, max_step_path = 0, None diff --git a/tests/test_utils/common_fixture.py b/tests/test_utils/common_fixture.py index d6a19b6..80cb353 100644 --- a/tests/test_utils/common_fixture.py +++ b/tests/test_utils/common_fixture.py @@ -50,6 +50,8 @@ init_config = Config( ), resume_tb_folder="", tensorboard_folder="", + alert_address=None, + monitor=dict(alert=dict(enable_feishu_alert=False, feishu_alert_address=None, light_monitor_address=None)), ) ) @@ -177,5 +179,5 @@ def del_tmp_file(): results += str(line.rstrip()) presults += line.rstrip().decode() + "\n" print(presults, flush=True) - except FileNotFoundError: + except: # noqa # pylint: disable=bare-except pass diff --git a/tests/test_utils/test_model_checkpoint.py b/tests/test_utils/test_model_checkpoint.py index bd93436..956880b 100644 --- a/tests/test_utils/test_model_checkpoint.py +++ b/tests/test_utils/test_model_checkpoint.py @@ -1,9 +1,10 @@ import os +from functools import partial import pytest import torch +import torch.distributed as dist -from internlm.core.context import global_context as gpc from internlm.core.context.parallel_context import Config from internlm.core.trainer import TrainState from internlm.solver.optimizer.hybrid_zero_optim import HybridZeroOptimizer @@ -15,27 +16,24 @@ from tests.test_utils.common_fixture import ( # noqa # pylint: disable=unused-i BOTO_SAVE_PATH, LOCAL_SAVE_PATH, del_tmp_file, + init_config, init_dist_and_model, reset_singletons, ) -TOTAL_STEP = 6 - -CKPT_EVERY = 4 -SNPASHOT_EVERY = 2 - - +# (TOTAL_STEP, CKPT_EVERY, SNPASHOT_EVERY) +step_info_list = [(8, 4, 2), (3, 4, 2), (1, 6, 3)] ckpt_config_list = [ # Old interface format dict( enable_save_ckpt=True, save_ckpt_folder=BOTO_SAVE_PATH, load_optimizer=True, - checkpoint_every=CKPT_EVERY, + checkpoint_every=0, async_upload=True, async_upload_tmp_folder=ASYNC_TMP_FOLDER, snapshot_ckpt_folder="/".join([BOTO_SAVE_PATH, "snapshot"]), - oss_snapshot_freq=SNPASHOT_EVERY, + oss_snapshot_freq=0, stop_file_path=None, load_model_only_folder=None, load_given_ckpt=False, @@ -47,11 +45,11 @@ ckpt_config_list = [ enable_save_ckpt=True, save_ckpt_folder=LOCAL_SAVE_PATH, load_optimizer=True, - checkpoint_every=CKPT_EVERY, + checkpoint_every=0, async_upload=False, async_upload_tmp_folder=ASYNC_TMP_FOLDER, snapshot_ckpt_folder="/".join([LOCAL_SAVE_PATH, "snapshot"]), - oss_snapshot_freq=SNPASHOT_EVERY, + oss_snapshot_freq=0, stop_file_path=None, load_model_only_folder=None, load_given_ckpt=False, @@ -62,10 +60,10 @@ ckpt_config_list = [ dict( enable_save_ckpt=True, save_ckpt_folder=BOTO_SAVE_PATH, - checkpoint_every=CKPT_EVERY, + checkpoint_every=0, async_upload=True, async_upload_tmp_folder=ASYNC_TMP_FOLDER, - oss_snapshot_freq=SNPASHOT_EVERY, + oss_snapshot_freq=0, stop_file_path=None, is_old_api=False, auto_resume=True, @@ -73,10 +71,10 @@ ckpt_config_list = [ dict( enable_save_ckpt=True, save_ckpt_folder=LOCAL_SAVE_PATH, - checkpoint_every=CKPT_EVERY, + checkpoint_every=0, async_upload=False, async_upload_tmp_folder=ASYNC_TMP_FOLDER, - oss_snapshot_freq=SNPASHOT_EVERY, + oss_snapshot_freq=0, stop_file_path=None, load_ckpt_folder=None, is_old_api=False, @@ -159,15 +157,63 @@ def del_tmp(): del_tmp_file() +def return_prefix_path(save_ckpt_folder): + if save_ckpt_folder.startswith("local:"): + return LOCAL_SAVE_PATH + else: + return BOTO_SAVE_PATH + + +def return_latest_save_path(save_ckpt_folder, total_step, snapshot_freq, ckpt_freq): + + snapshot_latest_step, normal_latest_step = 0, 0 + snapshot_latest_count, normal_latest_count = 0, 0 + + for i in range(total_step): + if (i + 1) % ckpt_freq == 0: + normal_latest_step = i + 1 + normal_latest_count += 1 + else: + if (i + 1) % snapshot_freq == 0: + snapshot_latest_step = i + 1 + snapshot_latest_count += 1 + + if snapshot_latest_step == 0: + return None, None + + if normal_latest_step >= snapshot_latest_step: + return normal_latest_step, os.path.join(return_prefix_path(save_ckpt_folder), f"{normal_latest_step}") + elif normal_latest_step < snapshot_latest_step: + if snapshot_latest_count % 2 == 0: + re_path = f"{return_prefix_path(save_ckpt_folder)}/snapshot/0" + else: + re_path = f"{return_prefix_path(save_ckpt_folder)}/snapshot/1" + return snapshot_latest_step, re_path + else: + assert False + + @pytest.mark.usefixtures("del_tmp") @pytest.mark.usefixtures("reset_singletons") +@pytest.mark.parametrize("step_info", step_info_list) @pytest.mark.parametrize("ckpt_config", ckpt_config_list) -def test_ckpt_mm(ckpt_config, init_dist_and_model): # noqa # pylint: disable=unused-import +def test_ckpt_mm(step_info, ckpt_config, init_dist_and_model): # noqa # pylint: disable=unused-import + from internlm.core.context import global_context as gpc from internlm.utils.model_checkpoint import CheckpointLoadMask, CheckpointLoadType ckpt_config = Config(ckpt_config) - assert ckpt_config.checkpoint_every < TOTAL_STEP - assert ckpt_config.oss_snapshot_freq < TOTAL_STEP + total_step, checkpoint_every, oss_snapshot_freq = step_info + print(total_step, checkpoint_every, oss_snapshot_freq, flush=True) + ckpt_config.checkpoint_every = checkpoint_every + ckpt_config.oss_snapshot_freq = oss_snapshot_freq + + bond_return_latest_save_path = partial( + return_latest_save_path, + ckpt_config.save_ckpt_folder, + total_step, + ckpt_config.oss_snapshot_freq, + ckpt_config.checkpoint_every, + ) model, opim = init_dist_and_model train_state = TrainState(gpc.config, None) @@ -178,7 +224,7 @@ def test_ckpt_mm(ckpt_config, init_dist_and_model): # noqa # pylint: disable=un ckpt_mm = CheckpointManager(ckpt_config, model=model, optimizer=opim) latest_ckpt_step = None - for i in range(TOTAL_STEP + 1): + for i in range(total_step): overwrite_model_value(model, i) overwrite_optim_state(opim, i) @@ -193,54 +239,119 @@ def test_ckpt_mm(ckpt_config, init_dist_and_model): # noqa # pylint: disable=un wait_async_upload_finish() latest_ckpt_info = ckpt_mm.query_lastest_ckpt() - assert latest_ckpt_info is not None - latest_ckpt = latest_ckpt_info["path"] - if ckpt_mm.save_ckpt_folder.startswith("local"): - assert latest_ckpt == "local:local_ckpt/snapshot/0", latest_ckpt + step, path = bond_return_latest_save_path() + assert latest_ckpt_info["path"] == path + if latest_ckpt_step is None: + assert latest_ckpt_step == step else: - assert latest_ckpt == f"{BOTO_SAVE_PATH}/snapshot/0", latest_ckpt + assert latest_ckpt_step == step - 1 + # resume from before save skpt del ckpt_mm SingletonMeta._instances = {} ckpt_mm = CheckpointManager(ckpt_config, model=model, optimizer=opim) ckpt_mm.try_resume_training(train_state) - assert latest_ckpt_step == 5 - assert train_state.step_count == 6 - assert train_state.batch_count == 6 - assert compare_optim_value(ckpt_mm.optimizer, latest_ckpt_step), ckpt_mm.optimizer.param_groups[0]["params"][0] - assert compare_model_value(ckpt_mm.model, latest_ckpt_step), list(ckpt_mm.model.parameters())[0][0] - if ckpt_mm.save_ckpt_folder.startswith("local:"): - ckpt_mm.load_ckpt_info = dict( - path=os.path.join(LOCAL_SAVE_PATH, "4"), - content=CheckpointLoadMask(("all",)), - ckpt_type=CheckpointLoadType.INTERNLM, - ) + if ckpt_config.checkpoint_every < total_step: + # we use step_count to decide when save ckpt, os here latest_ckpt_step = step_count - 1 + assert train_state.step_count == latest_ckpt_step + 1 + assert train_state.batch_count == latest_ckpt_step + 1 + assert compare_optim_value(ckpt_mm.optimizer, latest_ckpt_step), ckpt_mm.optimizer.param_groups[0]["params"][0] + assert compare_model_value(ckpt_mm.model, latest_ckpt_step), list(ckpt_mm.model.parameters())[0][0] + + if ckpt_mm.save_ckpt_folder.startswith("local:"): + ckpt_mm.load_ckpt_info = dict( + path=os.path.join(LOCAL_SAVE_PATH, f"{ckpt_config.checkpoint_every}"), + content=CheckpointLoadMask(("all",)), + ckpt_type=CheckpointLoadType.INTERNLM, + ) + else: + ckpt_mm.load_ckpt_info = dict( + path=os.path.join(BOTO_SAVE_PATH, f"{ckpt_config.checkpoint_every}"), + content=CheckpointLoadMask(("all",)), + ckpt_type=CheckpointLoadType.INTERNLM, + ) + + ckpt_mm.try_resume_training(train_state) + + assert train_state.step_count == ckpt_config.checkpoint_every + assert train_state.batch_count == ckpt_config.checkpoint_every + # compare value is same with i. + assert compare_optim_value(ckpt_mm.optimizer, ckpt_config.checkpoint_every - 1), ckpt_mm.optimizer.param_groups[ + 0 + ]["params"][0] + assert compare_model_value(ckpt_mm.model, ckpt_config.checkpoint_every - 1), list(ckpt_mm.model.parameters())[ + 0 + ][0] else: - ckpt_mm.load_ckpt_info = dict( - path=os.path.join(BOTO_SAVE_PATH, "4"), - content=CheckpointLoadMask(("all",)), - ckpt_type=CheckpointLoadType.INTERNLM, + pass + + +STOP_FILE_PATH = "./alter.log" + + +def query_quit_file(rank, world_size=2): + from internlm.core.context import global_context as gpc + from internlm.initialize import initialize_distributed_env + from internlm.utils.model_checkpoint import CheckpointSaveType + + ckpt_config = Config( + dict( + enable_save_ckpt=True, + save_ckpt_folder=BOTO_SAVE_PATH, + load_optimizer=True, + checkpoint_every=0, + async_upload=True, + async_upload_tmp_folder=ASYNC_TMP_FOLDER, + snapshot_ckpt_folder="/".join([BOTO_SAVE_PATH, "snapshot"]), + oss_snapshot_freq=0, + stop_file_path=STOP_FILE_PATH, + load_model_only_folder=None, + load_given_ckpt=False, + load_ckpt_folder=None, + is_old_api=True, + ), + ) + + os.environ["RANK"] = str(rank) + os.environ["LOCAL_RANK"] = str(rank) + os.environ["WORLD_SIZE"] = str(world_size) + os.environ["MASTER_ADDR"] = "127.0.0.1" + os.environ["MASTER_PORT"] = "12376" + + initialize_distributed_env(config=init_config, launcher="torch", master_port=12376, args_check=False) + train_state = TrainState(init_config, None) + ckpt_mm = CheckpointManager(ckpt_config, model=None, optimizer=None) + if rank == 0: + with open(STOP_FILE_PATH, "w+") as f: + f.write("5") + dist.barrier() + for i in range(10): + train_state.step_count = i + now_break, now_save_ckpt, save_type = ckpt_mm.quit_signal_handler(train_state) + print( + f"step:{i}, rank:{rank}, now_break:{now_break}, now_save_ckpt:{now_save_ckpt}, save_type:{save_type}", + flush=True, ) - - ckpt_mm.try_resume_training(train_state) - - assert train_state.step_count == 4 - assert train_state.batch_count == 4 - assert compare_optim_value(ckpt_mm.optimizer, 3), ckpt_mm.optimizer.param_groups[0]["params"][0] - assert compare_model_value(ckpt_mm.model, 3), list(ckpt_mm.model.parameters())[0][0] + if train_state.step_count == 5: + assert now_break is True + assert now_save_ckpt is True + assert save_type is CheckpointSaveType.NORMAL_CHECKPOINT + dist.barrier() + gpc.destroy() -@pytest.mark.usefixtures("del_tmp") -@pytest.mark.usefixtures("reset_singletons") -@pytest.mark.parametrize("ckpt_config", ckpt_config_list) -def test_ckpt_mm_ping(ckpt_config, init_dist_and_model): # noqa # pylint: disable=unused-import - ckpt_config = Config(ckpt_config) +def test_quit_siganl_handler(): # noqa # pylint: disable=unused-import + import multiprocessing + from multiprocessing.pool import Pool - model, opim = init_dist_and_model - SingletonMeta._instances = {} - ckpt_mm = CheckpointManager(ckpt_config, model=model, optimizer=opim) - ckpt_mm.try_ping_storage() + world_size = 2 + with Pool(processes=world_size, context=multiprocessing.get_context("spawn")) as pool: + items = [(0,), (1,)] + for result in pool.starmap(query_quit_file, items): + print(f"Got result: {result}", flush=True) + + os.remove(STOP_FILE_PATH) if __name__ == "__main__": From 717f0c9e64f6002a61b48a7f091cca077ae1325a Mon Sep 17 00:00:00 2001 From: Wenwen Qu Date: Wed, 13 Sep 2023 13:00:49 +0800 Subject: [PATCH 34/34] change float16 to bfloat16 --- configs/7B_sft.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configs/7B_sft.py b/configs/7B_sft.py index e0b9a8a..7f44533 100644 --- a/configs/7B_sft.py +++ b/configs/7B_sft.py @@ -128,7 +128,7 @@ model = dict( num_layers=NUM_LAYER, mlp_ratio=MLP_RATIO, apply_post_layer_norm=False, - dtype="torch.float16", # Support: "torch.float16", "torch.half", "torch.bfloat16", "torch.float32", "torch.tf32" + dtype="torch.bfloat16", # Support: "torch.float16", "torch.half", "torch.bfloat16", "torch.float32", "torch.tf32" norm_type="rmsnorm", layer_norm_epsilon=1e-5, use_flash_attn=True,