Initial commit
This commit is contained in:
commit
a76df9513d
7 changed files with 411 additions and 0 deletions
65
.github/workflows/build-image.yaml
vendored
Normal file
65
.github/workflows/build-image.yaml
vendored
Normal file
|
@ -0,0 +1,65 @@
|
|||
name: build-image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- dev
|
||||
tags:
|
||||
- '[0-9]+.*'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Convert repository name ot image name
|
||||
id: image_name
|
||||
run: |
|
||||
sed -E -e 's/docker-//' -e 's/^/image_name=/' <<<"${{ github.repository }}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
- name: Generate Docker tag names
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
# list of Docker images to use as base name for tags
|
||||
images: |
|
||||
ghcr.io/${{ steps.image_name.outputs.image_name }}
|
||||
${{ steps.image_name.outputs.image_name }}
|
||||
# generate Docker tags based on the following events/attributes
|
||||
tags: |
|
||||
# set latest tag for default branch
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
# set edge tag for dev branch
|
||||
type=edge,enable=true,branch=dev
|
||||
# Tags for non SemVer tag names
|
||||
type=match,pattern=([0-9]+.*),group=1
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.REGISTRY_TOKEN }}
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/arm/v7,linux/arm64/v8,linux/amd64
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
11
Dockerfile
Normal file
11
Dockerfile
Normal file
|
@ -0,0 +1,11 @@
|
|||
FROM python:3.12-alpine
|
||||
LABEL maintainer="Salvoxia <salvoxia@blindfish.info>"
|
||||
|
||||
COPY immich_auto_album.py requirements.txt docker/immich_auto_album.sh docker/setup_cron.sh /script/
|
||||
|
||||
RUN pip install --no-cache-dir -r /script/requirements.txt \
|
||||
&& chmod +x /script/setup_cron.sh /script/immich_auto_album.sh \
|
||||
&& rm -rf /tmp/* /var/tmp/* /var/cache/apk/* /var/cache/distfiles/*
|
||||
|
||||
WORKDIR /script
|
||||
CMD ["sh", "-c", "/script/setup_cron.sh && crond -f"]
|
144
README.md
Normal file
144
README.md
Normal file
|
@ -0,0 +1,144 @@
|
|||
# Immich Folder Album Creator
|
||||
|
||||
This is a python script designed to automatically create albums in [Immich](https://immich.app/) from a folder structure mounted into the Immich container.
|
||||
This is useful for automatically creating and populating albums for external libraries.
|
||||
|
||||
__Current compatibility:__ Immich v1.95.x and below
|
||||
|
||||
## Disclaimer
|
||||
This script is mostly based on the following original script: [REDVM/immich_auto_album.py](https://gist.github.com/REDVM/d8b3830b2802db881f5b59033cf35702)
|
||||
|
||||
## Installation
|
||||
|
||||
### Bare Python Script
|
||||
1. Download the script and its requirements
|
||||
```bash
|
||||
curl https://raw.githubusercontent.com/Salvoxia/immich-folder-album-creator/main/immich_auto_album.py -o immich_auto_album.py
|
||||
curl https://raw.githubusercontent.com/Salvoxia/immich-folder-album-creator/main/requirements.txt -o requirements.txt
|
||||
```
|
||||
2. Install requirements
|
||||
```bash
|
||||
pip3 install -r requirements.txt
|
||||
```
|
||||
3. Run the script
|
||||
```bash
|
||||
python3 ./immich_auto_album.py
|
||||
usage: immich_auto_album.py [-h] [-u] [-c CHUNK_SIZE] [-C FETCH_CHUNK_SIZE] [-l {CRITICAL,ERROR,WARNING,INFO,DEBUG}] root_path api_url api_key
|
||||
|
||||
Create Immich Albums from an external library path based on the top level folders
|
||||
|
||||
positional arguments:
|
||||
root_path The external libarary's root path in Immich
|
||||
api_url The root API URL of immich, e.g. https://immich.mydomain.com/api/
|
||||
api_key The Immich API Key to use
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
-u, --unattended Do not ask for user confirmation after identifying albums. Set this flag to run script as a cronjob. (default: False)
|
||||
-c CHUNK_SIZE, --chunk-size CHUNK_SIZE
|
||||
Maximum number of assets to add to an album with a single API call (default: 2000)
|
||||
-C FETCH_CHUNK_SIZE, --fetch-chunk-size FETCH_CHUNK_SIZE
|
||||
Maximum number of assets to fetch with a single API call (default: 5000)
|
||||
-l {CRITICAL,ERROR,WARNING,INFO,DEBUG}, --log-level {CRITICAL,ERROR,WARNING,INFO,DEBUG}
|
||||
Log level to use (default: INFO)
|
||||
```
|
||||
|
||||
### Docker
|
||||
|
||||
A Docker image is provided to be used as a runtime environment. It can be used to either run the script manually, or via cronjob by providing a crontab expression to the container. The container can then be added to the Immich compose stack directly.
|
||||
|
||||
#### Environment Variables
|
||||
The environment variables are analoguous to the script's command line arguments.
|
||||
|
||||
| Environment varible | Mandatory? | Description |
|
||||
| :------------------- | :----------- | :------------ |
|
||||
| ROOT_PATH | yes | The external libarary's root path in Immich |
|
||||
| API_URL | yes | The root API URL of immich, e.g. https://immich.mydomain.com/api/ |
|
||||
| API_KEY | yes | The Immich API Key to use |
|
||||
| CHUNK_SIZE | no | Maximum number of assets to add to an album with a single API call (default: 2000) |
|
||||
| FETCH_CHUNK_SIZE | no | Maximum number of assets to fetch with a single API call (default: 5000) |
|
||||
| LOG_LEVEL | no | Log level to use (default: INFO), allowed values: CRITICAL,ERROR,WARNING,INFO,DEBUG |
|
||||
|
||||
#### Run the container with Docker
|
||||
|
||||
To perform a manually triggered run, use the following command:
|
||||
|
||||
```bash
|
||||
docker run --env API_URL="https://immich.mydomain.com/api/" \
|
||||
--env API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \
|
||||
--env ROOT_PATH="/external_libs/photos" \
|
||||
salvoxia/immich-folder-album-creator:latest \
|
||||
/script/immich_auto_album.sh
|
||||
```
|
||||
|
||||
To set up the container to periodically run the script, give it a name, pass the TZ variable and a valid crontab expression as environment variable. This example runs the script every hour:
|
||||
```bash
|
||||
docker run --name immich-folder-album-creator \
|
||||
--env TZ="Europe/Berlin \
|
||||
--env CRON_EXPRESSION="0 * * * *" \
|
||||
--env API_URL="https://immich.mydomain.com/api/" \
|
||||
--env API_KEY="xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" \
|
||||
--env ROOT_PATH="/external_libs/photos" \
|
||||
salvoxia/immich-folder-album-creator:latest
|
||||
```
|
||||
|
||||
### Run the container with Docker-Compose
|
||||
|
||||
Adding the container to Immich's `docker-compose.yml` file:
|
||||
```yml
|
||||
version: "3.8"
|
||||
|
||||
#
|
||||
# WARNING: Make sure to use the docker-compose.yml of the current release:
|
||||
#
|
||||
# https://github.com/immich-app/immich/releases/latest/download/docker-compose.yml
|
||||
#
|
||||
# The compose file on main may not be compatible with the latest release.
|
||||
#
|
||||
|
||||
name: immich
|
||||
|
||||
services:
|
||||
immich-server:
|
||||
container_name: immich_server
|
||||
volumes:
|
||||
- /path/to/my/photos:/external_libs/photos
|
||||
...
|
||||
immich-folder-album-creator:
|
||||
container_name: immich_folder_album_creator
|
||||
image: salvoxia/immich-folder-album-creator:latest
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
API_URL: http://immich_server:3001/api
|
||||
API_KEY: xxxxxxxxxxxxxxxxx
|
||||
ROOT_PATH: /external_libs/photos
|
||||
CRON_EXPRESSION: "0 * * * *"
|
||||
TZ: Europe/Berlin
|
||||
|
||||
```
|
||||
|
||||
## How it works
|
||||
|
||||
The script utilizies [Immich's REST API](https://immich.app/docs/api/) to query all images indexed by Immich, extract the folder for all images that are in the top level of a provided `root_path`, then creates albums with the names of these folders (if not yet exists) and adds the images to the correct albums.
|
||||
|
||||
It is important to understand the concept of `root_path`. Suppose you provide an external library to Immich under the path `/external_libs/photos`.
|
||||
The folder structure of `photos` might look like this:
|
||||
```
|
||||
/external_libs/photos/Birthdays/John
|
||||
/external_libs/photos/Birthdays/Jane
|
||||
/external_libs/photos/Skiing 2023
|
||||
/external_libs/photos/Vacation 2020 02
|
||||
```
|
||||
If you set `root_path` to `/external_libs/photos`, the script will create three albums:
|
||||
- Birthdays
|
||||
- Skiing 2023
|
||||
- Vacation 2020 02
|
||||
|
||||
All photos from John's and Jane's birthdays will be added to the `Birthdays` album.
|
||||
|
||||
If you set `root_path` to `/external_libs/photos/Birthdays`, only two albums will be created:
|
||||
- John
|
||||
- Jane
|
||||
|
||||
Since Immich does not support nested albums ([yet?](https://github.com/immich-app/immich/discussions/2073)), neither does this script.
|
||||
|
24
docker/immich_auto_album.sh
Normal file
24
docker/immich_auto_album.sh
Normal file
|
@ -0,0 +1,24 @@
|
|||
#!/usr/bin/env sh
|
||||
|
||||
# Always run unattended
|
||||
arg_fetch_chunk_size=""
|
||||
arg_chunk_size=""
|
||||
arg_log_Level=""
|
||||
arg_root_path=""
|
||||
arg_api_url=""
|
||||
arg_api_key=""
|
||||
|
||||
if [ ! -z "$FETCH_CHUNK_SIZE" ]; then
|
||||
arg_fetch_chunk_size="-C $FETCH_CHUNK_SIZE"
|
||||
fi
|
||||
|
||||
if [ ! -z "$CHUNK_SIZE" ]; then
|
||||
arg_chunk_size="-c $CHUNK_SIZE"
|
||||
fi
|
||||
|
||||
if [ ! -z "$LOG_LEVEL" ]; then
|
||||
arg_log_Level="-l $LOG_LEVEL"
|
||||
fi
|
||||
|
||||
BASEDIR=$(dirname "$0")
|
||||
python3 $BASEDIR/immich_auto_album.py -u $arg_fetch_chunk_size $arg_chunk_size $arg_log_Level $ROOT_PATH $API_URL $API_KEY
|
7
docker/setup_cron.sh
Normal file
7
docker/setup_cron.sh
Normal file
|
@ -0,0 +1,7 @@
|
|||
#!/usr/bin/env sh
|
||||
|
||||
if [ ! -z "$CRON_EXPRESSION" ]; then
|
||||
(crontab -l 2>/dev/null; echo "$CRON_EXPRESSION /script/immich_auto_album.sh > /proc/1/fd/1 2>/proc/1/fd/2") | crontab -
|
||||
# Make environment variables accessible to cron
|
||||
printenv > /etc/environment
|
||||
fi
|
159
immich_auto_album.py
Normal file
159
immich_auto_album.py
Normal file
|
@ -0,0 +1,159 @@
|
|||
import requests
|
||||
import os
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
import datetime
|
||||
from collections import defaultdict
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(description="Create Immich Albums from an external library path based on the top level folders", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
|
||||
parser.add_argument("root_path", help="The external libarary's root path in Immich")
|
||||
parser.add_argument("api_url", help="The root API URL of immich, e.g. https://immich.mydomain.com/api/")
|
||||
parser.add_argument("api_key", help="The Immich API Key to use")
|
||||
parser.add_argument("-u", "--unattended", action="store_true", help="Do not ask for user confirmation after identifying albums. Set this flag to run script as a cronjob.")
|
||||
parser.add_argument("-c", "--chunk-size", default=2000, type=int, help="Maximum number of assets to add to an album with a single API call")
|
||||
parser.add_argument("-C", "--fetch-chunk-size", default=5000, type=int, help="Maximum number of assets to fetch with a single API call")
|
||||
parser.add_argument("-l", "--log-level", default="INFO", choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG'], help="Log level to use")
|
||||
args = vars(parser.parse_args())
|
||||
# set up logger to log in logfmt format
|
||||
logging.basicConfig(level=args["log_level"], stream=sys.stdout, format='time=%(asctime)s level=%(levelname)s msg=%(message)s')
|
||||
logging.Formatter.formatTime = (lambda self, record, datefmt=None: datetime.datetime.fromtimestamp(record.created, datetime.timezone.utc).astimezone().isoformat(sep="T",timespec="milliseconds"))
|
||||
#logging.basicConfig(level=args["log_level"])
|
||||
|
||||
|
||||
root_path = args["root_path"]
|
||||
root_url = args["api_url"]
|
||||
api_key = args["api_key"]
|
||||
number_of_images_per_request = args["chunk_size"]
|
||||
number_of_assets_to_fetch_per_request = args["fetch_chunk_size"]
|
||||
unattended = args["unattended"]
|
||||
logging.debug("root_path = %s", root_path)
|
||||
logging.debug("root_url = %s", root_path)
|
||||
logging.debug("api_key = %s", api_key)
|
||||
logging.debug("number_of_images_per_request = %d", number_of_images_per_request)
|
||||
logging.debug("number_of_assets_to_fetch_per_request = %d", number_of_assets_to_fetch_per_request)
|
||||
logging.debug("unattended = %s", unattended)
|
||||
|
||||
# Yield successive n-sized
|
||||
# chunks from l.
|
||||
def divide_chunks(l, n):
|
||||
|
||||
# looping till length l
|
||||
for i in range(0, len(l), n):
|
||||
yield l[i:i + n]
|
||||
|
||||
|
||||
requests_kwargs = {
|
||||
'headers' : {
|
||||
'x-api-key': api_key,
|
||||
'Content-Type': 'application/json',
|
||||
'Accept': 'application/json'
|
||||
}
|
||||
}
|
||||
if root_path[-1] != '/':
|
||||
root_path = root_path + '/'
|
||||
if root_url[-1] != '/':
|
||||
root_url = root_url + '/'
|
||||
|
||||
logging.info("Requesting all assets")
|
||||
assets = []
|
||||
# Initial API call, let's fetch our first chunk
|
||||
r = requests.get(root_url+'asset?take='+str(number_of_assets_to_fetch_per_request), **requests_kwargs)
|
||||
assert r.status_code == 200
|
||||
logging.debug("Received %s assets with chunk 1", len(r.json()))
|
||||
assets = assets + r.json()
|
||||
|
||||
# If we got a full chunk size back, let's perfrom subsequent calls until we get less than a full chunk size
|
||||
skip = 0
|
||||
while len(r.json()) == number_of_assets_to_fetch_per_request:
|
||||
skip += number_of_assets_to_fetch_per_request
|
||||
r = requests.get(root_url+'asset?take='+str(number_of_assets_to_fetch_per_request)+'&skip='+str(skip), **requests_kwargs)
|
||||
if skip == number_of_assets_to_fetch_per_request and assets == r.json():
|
||||
logging.info("Non-chunked Immich API detected, stopping fetching assets since we already got all in our first call")
|
||||
break
|
||||
assert r.status_code == 200
|
||||
logging.debug("Received %s assets with chunk", len(r.json()))
|
||||
assets = assets + r.json()
|
||||
logging.info("%d photos found", len(assets))
|
||||
|
||||
|
||||
|
||||
logging.info("Sorting assets to corresponding albums using folder name")
|
||||
album_to_assets = defaultdict(list)
|
||||
for asset in assets:
|
||||
asset_path = asset['originalPath']
|
||||
if root_path not in asset_path:
|
||||
continue
|
||||
album_name = asset_path.replace(root_path, '').split('/')[0]
|
||||
# Check that the extracted album name is not actually a file name in root_path
|
||||
if not asset_path.endswith(album_name):
|
||||
album_to_assets[album_name].append(asset['id'])
|
||||
|
||||
album_to_assets = {k:v for k, v in sorted(album_to_assets.items(), key=(lambda item: item[0]))}
|
||||
|
||||
logging.info("%d albums identified", len(album_to_assets))
|
||||
logging.info("Album list: %s", list(album_to_assets.keys()))
|
||||
if not unattended:
|
||||
print("Press Enter to continue, Ctrl+C to abort")
|
||||
input()
|
||||
|
||||
|
||||
album_to_id = {}
|
||||
|
||||
logging.info("Listing existing albums on immich")
|
||||
r = requests.get(root_url+'album', **requests_kwargs)
|
||||
assert r.status_code == 200
|
||||
albums = r.json()
|
||||
album_to_id = {album['albumName']:album['id'] for album in albums }
|
||||
logging.info("%d existing albums identified", len(albums))
|
||||
|
||||
|
||||
logging.info("Creating albums if needed")
|
||||
cpt = 0
|
||||
for album in album_to_assets:
|
||||
if album in album_to_id:
|
||||
continue
|
||||
data = {
|
||||
'albumName': album,
|
||||
'description': album
|
||||
}
|
||||
r = requests.post(root_url+'album', json=data, **requests_kwargs)
|
||||
assert r.status_code in [200, 201]
|
||||
album_to_id[album] = r.json()['id']
|
||||
logging.info('Album %s added!', album)
|
||||
cpt += 1
|
||||
logging.info("%d albums created", cpt)
|
||||
|
||||
|
||||
logging.info("Adding assets to albums")
|
||||
# Note: Immich manages duplicates without problem,
|
||||
# so we can each time ad all assets to same album, no photo will be duplicated
|
||||
for album, assets in album_to_assets.items():
|
||||
id = album_to_id[album]
|
||||
|
||||
# Divide our assets into chunks of number_of_images_per_request,
|
||||
# So the API can cope
|
||||
assets_chunked = list(divide_chunks(assets, number_of_images_per_request))
|
||||
for assets_chunk in assets_chunked:
|
||||
data = {'ids':assets_chunk}
|
||||
r = requests.put(root_url+f'album/{id}/assets', json=data, **requests_kwargs)
|
||||
if r.status_code not in [200, 201]:
|
||||
print(album)
|
||||
print(r.json())
|
||||
print(data)
|
||||
continue
|
||||
assert r.status_code in [200, 201]
|
||||
response = r.json()
|
||||
|
||||
cpt = 0
|
||||
for res in response:
|
||||
if not res['success']:
|
||||
if res['error'] != 'duplicate':
|
||||
logging.warning("Error adding an asset to an album: %s", res['error'])
|
||||
else:
|
||||
cpt += 1
|
||||
if cpt > 0:
|
||||
logging.info("%d new assets added to %s", cpt, album)
|
||||
|
||||
logging.info("Done!")
|
1
requirements.txt
Normal file
1
requirements.txt
Normal file
|
@ -0,0 +1 @@
|
|||
requests
|
Loading…
Add table
Reference in a new issue