mirror of
https://github.com/tloen/alpaca-lora.git
synced 2024-10-01 01:05:56 -04:00
a452dfcc18
* Added Dockerfile for inference * Added instructions for Dockerfile * Update README.md * Update README.md * Update README.md * Pass env through Dockerfile * Added docker compose setup and instructions * Added more environment options * Set a safer default mount point * add docker-compose changes * Added Dockerfile for inference * Added instructions for Dockerfile * Update README.md * Update README.md * Update README.md * Pass env through Dockerfile * Added docker compose setup and instructions * Added more environment options * Set a safer default mount point * add to gitignore, update to new generate.py * add docker ignore, simplify docker compose file * add back missing requirements * Adjustments to compose and generate.py, added Docker to README.md * Linting adjust to Black * Adjusting import linting * Update README.md * Update README.md * Removed comment by original Dockerfile creator. Comment not necessary. * cleanup README Co-authored-by: Francesco Saverio Zuppichini <zuppif@usi.ch> --------- Co-authored-by: Francesco Saverio Zuppichini <zuppif@usi.ch> Co-authored-by: Chris Alexiuk <c.s.alexiuk@gmail.com> Co-authored-by: ElRoberto538 <> Co-authored-by: Sam Sipe <samsipe@gmail.com> Co-authored-by: Eric J. Wang <eric.james.wang@gmail.com>
29 lines
643 B
YAML
29 lines
643 B
YAML
version: '3'
|
|
|
|
services:
|
|
alpaca-lora:
|
|
build:
|
|
context: ./
|
|
dockerfile: Dockerfile
|
|
args:
|
|
BUILDKIT_INLINE_CACHE: "0"
|
|
image: alpaca-lora
|
|
shm_size: '64gb'
|
|
command: generate.py --load_8bit --base_model $BASE_MODEL --lora_weights 'tloen/alpaca-lora-7b'
|
|
restart: unless-stopped
|
|
volumes:
|
|
- alpaca-lora:/root/.cache # Location downloaded weights will be stored
|
|
ports:
|
|
- 7860:7860
|
|
deploy:
|
|
resources:
|
|
reservations:
|
|
devices:
|
|
- driver: nvidia
|
|
count: all
|
|
capabilities: [ gpu ]
|
|
|
|
volumes:
|
|
alpaca-lora:
|
|
name: alpaca-lora
|