diff --git a/.github/dependabot.yml b/.github/dependabot.yml index d9c8d81..b90dd75 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,9 +1,22 @@ version: 2 updates: - - package-ecosystem: "pip" # See documentation for possible values - directory: "/" # Location of package manifests + - package-ecosystem: "pip" + directory: "/" schedule: interval: "weekly" commit-message: prefix: "Poetry" include: "scope" + ignore: + - dependency-name: "*" + update-types: ["version-update:semver-patch"] + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "weekly" + commit-message: + prefix: "Github Actions" + include: "scope" + ignore: + - dependency-name: "*" + update-types: ["version-update:semver-minor"] diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 6de48b7..7e9362d 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -9,13 +9,13 @@ jobs: deploy: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install poetry run: pipx install poetry - name: Setup Python - uses: actions/setup-python@v3.1.3 + uses: actions/setup-python@v4.7.1 with: python-version: '3.x' cache: poetry diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..ba96af8 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,29 @@ +FROM python:3.11-bookworm as pre-build + +ENV POETRY_HOME=/opt/poetry +RUN curl -sSL https://install.python-poetry.org | python3 - + +FROM pre-build as build + +WORKDIR /app + +COPY ./pyproject.toml ./ +COPY ./poetry.lock ./ + +RUN $POETRY_HOME/bin/poetry install + +COPY ./mkdocs.yml ./ +COPY ./docs ./docs +COPY ./README.md ./docs/README.md + +RUN $POETRY_HOME/bin/poetry run mkdocs build + + +FROM python:3.11-alpine as runtime + +WORKDIR /app + +COPY --from=build ./app/site ./ + +ENTRYPOINT [ "python3" ] +CMD [ "-m", "http.server", "8000" ] \ No newline at end of file diff --git a/docs/containers/docker.md b/docs/containers/docker.md index 1b5b404..f738a75 100644 --- a/docs/containers/docker.md +++ b/docs/containers/docker.md @@ -63,7 +63,7 @@ VMs incur a lot of overhead beyond what is being consumed by your application lo ### [`docker run`](https://docs.docker.com/engine/reference/commandline/run/) -```sh +```sh linenums="1" docker run # run selected app inside a container (downloaded from Docker Hub if missing from image) docker run -d|--detach # run docker container in the background (does not occupy stdout & stderr) docker run -i|--interactive # run docker container in interactive mode (read stdin) @@ -80,23 +80,24 @@ docker run --name= # set container name ### [`docker container`](https://docs.docker.com/engine/reference/commandline/container/) -```sh +```sh linenums="1" docker container ls # list of currently running containers docker container ls -a|--all # list of all containers, running and exited docker container rm # remove one or more containers docker container prune # remove stopped containers - docker container inspect # full details about a container docker container logs # see container logs docker container stop # stop a running container docker container start # start a stopped container + +docker container exec # exec a command inside a container ``` ### [`docker image`](https://docs.docker.com/engine/reference/commandline/image/) -```sh +```sh linenums="1" docker image ls # list of existing images docker image rm # remove one or more images docker image prune # remove unused images @@ -105,20 +106,20 @@ docker image pull # download an image w/o starting the container ### [`docker build`](https://docs.docker.com/engine/reference/commandline/build/) -```sh +```sh linenums="1" docker build -t -f # build image with specific tag (usually user/app:version) docker build -t -f --build-arg ARG=value # pass args to ARG steps ``` ### [`docker push`](https://docs.docker.com/engine/reference/commandline/push/) -```sh +```sh linenums="1" docker push # publish image to registry (defaults to Docker Hub) ``` ## [Dockerfile](https://docs.docker.com/engine/reference/builder/) -```docker +```docker linenums="1" # starting image or scratch FROM : @@ -146,41 +147,12 @@ CMD ENTRYPOINT ``` -### `CMD` +### `CMD` vs `ENTRYPOINT` -Used to provide all the default scenarios which can be overridden. +`CMD` is used to provide all the default scenarios which can be overridden. *Anything* defined in CMD can be overridden by passing arguments in `docker run` command. -#### Default executable - -This instructions is used to define a default executable for a container to execute. - -If you want to create a generic docker image, where users can pass any supported command to be executed on container invocation, then this instruction is the one to use. - -Entrypoint instruction should not be defined in Dockerfile for this use case. - -```docker -CMD ["executable", "arg1", "arg2"] -``` - -#### Default arguments - -For this use case, we don’t specify executable in this instruction at all, but simply define some arguments which are used as default/additional -arguments for executable defined in the entrypoint instruction. - -Thus, entrypoint instruction is required in dockerfile for this use case to define an executable. - -```docker -ENTRYPOINT ["executable"] -CMD ["arg1", "arg2"] -``` - -> **Note**: Anything defined in CMD can be overridden by passing arguments in `docker run` command. - -### `ENTRYPOINT` - -Used to define specific executable and arguments to be executed during container invocation which cannot be overridden. - -This is used to constraint the user to execute anything else. User can however define arguments to be passed in the executable by adding them in the `docker run` command. +`ENTRYPOINT` is used to define a specific executable (and it's arguments) to be executed during container invocation which cannot be overridden. +The user can however define arguments to be passed in the executable by adding them in the `docker run` command. ## [Docker Multi-Stage Build](https://docs.docker.com/develop/develop-images/multistage-build/) @@ -188,7 +160,7 @@ With multi-stage builds, it's possible to use multiple `FROM` statements in the It's possible to selectively copy artifacts from one stage to another, leaving behind everything not wanted in the final image. -```docker +```docker linenums="1" FROM : AS RUN # install external dependencies (apt get ...) @@ -214,7 +186,7 @@ COPY --from= CMD ["executable"] # run app ``` -```docker +```docker linenums="1" FROM mcr.microsoft.com/dotnet/: AS runtime RUN # install external dependencies (apt get ...) @@ -247,7 +219,7 @@ ENTRYPOINT ["dotnet", ".dll"] Starting container networks: `bridge` (default), `none`, `host`. -```sh +```sh linenums="1" docker run --network=none/host # specify a non-default network to be used docker network ls # list all available networks ``` @@ -261,7 +233,7 @@ None: Containers are not attached to a network and cannot access other container ## User-defined Networks -```sh +```sh linenums="1" docker network create \ --driver NETWORK_TYPE \ --subnet GATEWAY_TP/SUBNET_MASK_SIZE @@ -278,7 +250,7 @@ Docker has an internal DNS that allows finding other container by their name ins ## File System -```sh +```sh linenums="1" /var/lib/docker |_ |_containers @@ -298,7 +270,7 @@ To modify a file during while the container runs docker creates a local copy in **volume mounting**: create a volume under the docker installation folder (`/var/lib/docker/volumes/`). **bind mounting**: link docker to an exiting folder to be used as a volume. -```sh +```sh linenums="1" docker run -v : : # older command for bind mounting docker run --mount type=bind, source=:, target= : # modern command for bind mounting ``` @@ -315,7 +287,7 @@ Using Compose is basically a three-step process: 2. Define the services that make up your app in `docker-compose.yml` so they can be run together in an isolated environment. 3. Run `docker-compose up` and Compose starts and runs the entire app. -```yaml +```yaml linenums="1" version: 3.x services: : @@ -327,7 +299,7 @@ services: dockerfile: <*.Dockerfile> args: # pass args to dockerfile ARG: - - ARG= + - ARG= ports: - : networks: # attach container to one or more networks @@ -338,10 +310,18 @@ services: ENV_VAR: - ENV_VAR= env_file: - - # reusable env file + - # reusable env file volumes: - "./:" # service-dedicated volume - ":" # reuseable volume + healthcheck: + disable: # set to true to disable + test: curl -f http://localhost # set to ["NONE"] to disable + interval: # interval between checks (default 30s) + timeout: # check fail timeout (default 30s) + retries: # num of retries before unhealty (default 3) + start_period: # container init grace pediod (default 5s) + start_interval: # check interval in start period # reusable volume definitions volumes: diff --git a/docs/containers/kubernetes.md b/docs/containers/kubernetes.md index 0d790ea..e600461 100644 --- a/docs/containers/kubernetes.md +++ b/docs/containers/kubernetes.md @@ -96,7 +96,7 @@ As pods successfully complete, the Job tracks the successful completions. When a ## Kubernetes Configuration -Each kubernetes configuration file is composed by 3 parts: +Each kubernetes configuration file is composed by 3 parts: - metadata - specification @@ -110,7 +110,7 @@ Each kubernetes configuration file is composed by 3 parts: ### `kubectl get` -```sh +```sh linenums="1" kubectl config get-contexts # list available contexts kubectl get namespaces # list namespaces inside current context @@ -121,11 +121,12 @@ kubectl get pod [-n|--namespace ] -o|--output jsonpath='{.spec. ### `kubectl exec` -```sh +```sh linenums="1" kubectl exec [-i|--stdin] [-t|--tty] [-n|--namespace ] [-c|--container ] -- # execute a command inside a container ``` + ### `kubectl logs` -```sh +```sh linenums="1" kubectl logs [-f|--follow] [-n|--namespace ] [-c|--container] # get pod/container logs -``` \ No newline at end of file +``` diff --git a/docs/databases/mongo-db.md b/docs/databases/mongo-db.md index db214fc..cf8e586 100644 --- a/docs/databases/mongo-db.md +++ b/docs/databases/mongo-db.md @@ -25,7 +25,7 @@ MongoDB automatically creates an `ObjectId()` if it's not provided. To create a database is sufficient to switch towards a non existing one with `use ` (implicit creation). The database is not actually created until a document is inserted. -```sh +```sh linenums="1" show dbs # list all databases use # use a particular database show collections # list all collection for the current database @@ -38,7 +38,7 @@ db..insertOne({document}) # implicit collection creation ## Operators (MQL Syntax) -```json +```json linenums="1" /* --- Update operators --- */ { "$inc": { "": "", ... } } // Increment value { "$set": { "": "", ... } } // Set value @@ -79,7 +79,7 @@ db..insertOne({document}) # implicit collection creation > **Note**: `$` is used to access the value of the field dynamically -```json +```json linenums="1" { "$expr": { "" } } // aggregation expression, variables, conditional expressions { "$expr": { "$": [ "$", "$" ] } } // compare field values (operators use aggregation syntax) ``` @@ -95,7 +95,7 @@ Insertion results: - error -> rollback - success -> entire documents gets saved -```sh +```sh linenums="1" # explicit collection creation, all options are optional db.createCollection( , { @@ -128,7 +128,7 @@ db..insertMany([ { document }, { document } ] , { "ordered": false } ### Querying -```sh +```sh linenums="1" db..findOne() # find only one document db..find(filter) # show selected documents db..find().pretty() # show documents formatted @@ -172,7 +172,7 @@ db..find().hint( { $natural : -1 } ) # force the query to perform a [Update Operators](https://docs.mongodb.com/manual/reference/operator/update/ "Update Operators Documentation") -```sh +```sh linenums="1" db..replaceOne(filter, update, options) db..updateOne(filter, update, {upsert: true}) # modify document if existing, insert otherwise @@ -181,7 +181,7 @@ db..updateOne(filter, { "$push": { ... }, "$set": { ... }, { "$inc": ### Deletion -```sh +```sh linenums="1" db..deleteOne(filter, options) db..deleteMany(filter, options) @@ -199,7 +199,7 @@ Utility to import all docs into a specified collection. If the collection already exists `--drop` deletes it before reuploading it. **WARNING**: CSV separators must be commas (`,`) -```sh +```sh linenums="1" mongoimport --uri= @@ -218,7 +218,7 @@ mongoimport Utility to export documents into a specified file. -```sh +```sh linenums="1" mongoexport --collection= --uri= @@ -272,7 +272,7 @@ Indexes _slow down writing operations_ since the index must be updated at every ### Diagnosis and query planning -```sh +```sh linenums="1" db..find({...}).explain() # explain won't accept other functions db.explain()..find({...}) # can accept other functions db.explain("executionStats")..find({...}) # more info @@ -280,7 +280,7 @@ db.explain("executionStats")..find({...}) # more info ### Index Creation -```sh +```sh linenums="1" db..createIndex( , ) db..createIndex( { "": , "": , ... } ) # normal, compound or multikey (field is array) index @@ -302,7 +302,7 @@ db..createIndex( ### [Index Management](https://docs.mongodb.com/manual/tutorial/manage-indexes/) -```sh +```sh linenums="1" # view all db indexes db.getCollectionNames().forEach(function(collection) { indexes = db[collection].getIndexes(); @@ -339,7 +339,7 @@ handling connections, requests and persisting the data. ### Basic Shell Helpers -```sh +```sh linenums="1" db.() # database interaction db..() # collection interaction rs.(); # replica set deployment and management @@ -378,7 +378,7 @@ Log Verbosity Level: - `0`: Default Verbosity (Information) - `1 - 5`: Increases the verbosity up to Debug messages -```sh +```sh linenums="1" db.getLogComponents() # get components and their verbosity db.adminCommand({"getLog": ""}) # retrieve logs (getLog must be run on admin db -> adminCommand) db.setLogLevel(, ""); # set log level (output is OLD verbosity levels) @@ -404,7 +404,7 @@ Events captured by the profiler: > **Note**: Logs are saved in the `system.profile` _capped_ collection. -```sh +```sh linenums="1" db.setProfilingLevel(n) # set profiler level db.setProfilingLevel(1, { slowms: }) db.getProfilingStatus() # check profiler status @@ -454,7 +454,7 @@ Built-in Roles Groups and Names: - Backup/Restore: `backup`, `restore` - Super User: `root` -```sh +```sh linenums="1" db.createUser( { user: "", @@ -509,7 +509,7 @@ Variable syntax in aggregations: Filters the documents to pass only the documents that match the specified condition(s) to the next pipeline stage. -```sh +```sh linenums="1" db..aggregate([ { "$match": { "" } }, @@ -541,7 +541,7 @@ Passes along the documents with the requested fields to the next stage in the pi - [`$sum`][$sum_docs] - [`$avg`][$avg_docs] -```sh +```sh linenums="1" db..aggregate([ { "$project": { @@ -598,7 +598,7 @@ db..aggregate([ Adds new fields to documents (can be result of computation). `$addFields` outputs documents that contain _all existing fields_ from the input documents and newly added fields. -```sh +```sh linenums="1" db..aggregate({ { $addFields: { : , ... } } }) @@ -610,7 +610,7 @@ db..aggregate({ The $`group` stage separates documents into groups according to a "group key". The output is one document for each unique group key. -```sh +```sh linenums="1" db..aggregate([ { "$group": { @@ -629,7 +629,7 @@ db..aggregate([ Deconstructs an array field from the input documents to output a document for each element. Each output document is the input document with the value of the array field replaced by the element -```sh +```sh linenums="1" db..aggregate([ { "$unwind": "" } @@ -647,7 +647,7 @@ db..aggregate([ ### [`$count` Aggregation Stage][$count_docs] -```sh +```sh linenums="1" db..aggregate([ { "$count": "" } ]) @@ -657,7 +657,7 @@ db..aggregate([ ### [`$sort` Aggregation Stage][$sort_docs] -```sh +```sh linenums="1" db..aggregate([ { "$sort": { @@ -676,7 +676,7 @@ db..aggregate([ ### [`$skip` Aggregation Stage][$skip_docs] -```sh +```sh linenums="1" db..aggregate([ { "$skip": "" } ]) @@ -686,7 +686,7 @@ db..aggregate([ ### [`$limit` Aggregation Stage][$limit_docs] -```sh +```sh linenums="1" db..aggregate([ { "$limit": "" } ]) @@ -701,7 +701,7 @@ The `$lookup` stage adds a new array field to each input document. The new array > **Note**: To combine elements from two different collections, use the [`$unionWith`][$unionWith_docs] pipeline stage. -```sh +```sh linenums="1" db..aggregate([ { "$lookup": { @@ -722,9 +722,9 @@ db..aggregate([ Performs a recursive search on a collection, with options for restricting the search by recursion depth and query filter. -The connection between documents follows `.` => `.`. The collection on which the aggregation is performed and the `from` collection can be the same (in-collection search) or different (cross-collection search) +The collection on which the aggregation is performed and the `from` collection can be the same (in-collection search) or different (cross-collection search) -```sh +```sh linenums="1" db..aggregate([ { $graphLookup: { @@ -754,7 +754,7 @@ Each output document contains two fields: an `_id` field containing the distinct The documents are sorted by count in descending order. -```sh +```sh linenums="1" db..aggregate([ { $sortByCount: } ]) diff --git a/docs/databases/redis.md b/docs/databases/redis.md index a130b89..df1436e 100644 --- a/docs/databases/redis.md +++ b/docs/databases/redis.md @@ -10,14 +10,14 @@ Often Redis it is called a *data structure* server because it has outer key-valu ### Server Startup -```bash +```bash linenums="1" redis-server # start the server redis-cli ``` ### [Key-Value Pairs](https://redis.io/commands#generic) -```sh +```sh linenums="1" SET [ EX ] # store a key-value pair, TTL optional GET # read a key content EXISTS # check if a key exists @@ -40,7 +40,7 @@ PERSIST # make the key permanent A list is a series of ordered values. -```sh +```sh linenums="1" RPUSH ... # add one or more values to the end of the list LPUSH ... # add one or more values to the start of a list @@ -55,7 +55,7 @@ RPOP # remove and return the last item fro the list A set is similar to a list, except it does not have a specific order and each element may only appear once. -```sh +```sh linenums="1" SADD ... # add one or more values to the set (return 0 if values are already inside) SREM # remove the given member from the set, return 1 or 0 to signal if the member was actually there or not. SPOP # remove and return value from the set @@ -72,7 +72,7 @@ Sets are a very handy data type, but as they are unsorted they don't work well f A sorted set is similar to a regular set, but now each value has an associated score. This score is used to sort the elements in the set. -```sh +```sh linenums="1" ZADD # add a value with it's score ZRANGE # return a subset of the sortedSet @@ -84,7 +84,7 @@ ZRANGE # return a subset of the sortedSet Hashes are maps between string fields and string values, so they are the perfect data type to represent objects. -```sh +```sh linenums="1" HSET [ ... ] # set the string of a hash field HSETNX # set the value of a hash field, only if the field does not exist diff --git a/docs/databases/sql.md b/docs/databases/sql.md index 1870cbb..3504865 100644 --- a/docs/databases/sql.md +++ b/docs/databases/sql.md @@ -2,7 +2,7 @@ ## DDL -```sql +```sql linenums="1" show databases; -- mostra database CREATE DATABASE ; -- database creation use ; -- usa un database particolare @@ -16,7 +16,7 @@ show tables; -- mostra tabelle del database ### Table Creation -```sql +```sql linenums="1" CREATE TABLE (