From add5805b912f56706683497ee6eca324fd3cff03 Mon Sep 17 00:00:00 2001 From: "D. Arnodo" <124083497+darnodo@users.noreply.github.com> Date: Tue, 25 Feb 2025 19:22:12 +0100 Subject: [PATCH] Start dev (#4) * Add Netbox configuration and plugins * Add Containerlab topology * Add template * Update Documentation --- .devcontainer/devcontainer.json | 19 + .gitignore | 11 +- README.md | 65 +-- containerlab/fabric_vxlan.yml | 86 ++++ containerlab/lab_definition.yml | 64 --- containerlab/network_images/network-images.md | 4 + documentation/CookBook.md | 18 + documentation/INSTALLATION.md | 125 ++++- .../assets/images/diagrams/VXLAN.drawio | 486 +++++++++++++----- netbox/Dockerfile | 127 ----- netbox/configuration/configuration.py | 318 ------------ netbox/configuration/extra.py | 49 -- netbox/configuration/ldap/extra.py | 28 - netbox/configuration/ldap/ldap_config.py | 111 ---- netbox/configuration/logging.py | 55 -- netbox/configuration/plugins.py | 13 - netbox/docker-compose.override.yml | 5 - netbox/docker-compose.yml | 90 ---- netbox/docker/configuration.docker.py | 91 ---- netbox/docker/docker-entrypoint.sh | 99 ---- netbox/docker/housekeeping.sh | 8 - netbox/docker/launch-netbox.sh | 57 -- netbox/docker/ldap_config.docker.py | 23 - netbox/docker/nginx-unit.json | 57 -- netbox/env/netbox.env | 34 -- netbox/env/postgres.env | 3 - netbox/env/redis-cache.env | 1 - netbox/env/redis.env | 1 - pyproject.toml | 105 ++++ templates/leaves.j2 | 44 ++ templates/spines.j2 | 44 ++ utilities/Create_Fabric/add_customers.py | 68 +++ .../Create_Fabric/create_vxlan_fabric.py | 343 ++++++++++++ utilities/Create_Fabric/helpers/__init__.py | 0 .../Create_Fabric/helpers/netbox_backend.py | 226 ++++++++ utilities/Devices/devices_model.yml | 45 ++ utilities/IPAM/subnets.yml | 14 + utilities/VPN/Customers.yml | 9 + utilities/import.py | 340 ++++++++++++ uv.lock | 330 ++++++++++++ 40 files changed, 2185 insertions(+), 1431 deletions(-) create mode 100644 .devcontainer/devcontainer.json create mode 100755 containerlab/fabric_vxlan.yml delete mode 100755 containerlab/lab_definition.yml create mode 100644 containerlab/network_images/network-images.md create mode 100644 documentation/CookBook.md delete mode 100755 netbox/Dockerfile delete mode 100755 netbox/configuration/configuration.py delete mode 100755 netbox/configuration/extra.py delete mode 100755 netbox/configuration/ldap/extra.py delete mode 100755 netbox/configuration/ldap/ldap_config.py delete mode 100755 netbox/configuration/logging.py delete mode 100755 netbox/configuration/plugins.py delete mode 100755 netbox/docker-compose.override.yml delete mode 100755 netbox/docker-compose.yml delete mode 100755 netbox/docker/configuration.docker.py delete mode 100755 netbox/docker/docker-entrypoint.sh delete mode 100755 netbox/docker/housekeeping.sh delete mode 100755 netbox/docker/launch-netbox.sh delete mode 100755 netbox/docker/ldap_config.docker.py delete mode 100755 netbox/docker/nginx-unit.json delete mode 100755 netbox/env/netbox.env delete mode 100755 netbox/env/postgres.env delete mode 100755 netbox/env/redis-cache.env delete mode 100755 netbox/env/redis.env create mode 100644 pyproject.toml create mode 100644 templates/leaves.j2 create mode 100644 templates/spines.j2 create mode 100644 utilities/Create_Fabric/add_customers.py create mode 100644 utilities/Create_Fabric/create_vxlan_fabric.py create mode 100644 utilities/Create_Fabric/helpers/__init__.py create mode 100644 utilities/Create_Fabric/helpers/netbox_backend.py create mode 100644 utilities/Devices/devices_model.yml create mode 100644 utilities/IPAM/subnets.yml create mode 100644 utilities/VPN/Customers.yml create mode 100644 utilities/import.py create mode 100644 uv.lock diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..e010018 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,19 @@ +{ + "image": "ghcr.io/srl-labs/containerlab/devcontainer-dind:latest", + "forwardPorts": [ + 50080, + 5001 + ], + "customizations": { + "vscode": { + "extensions": [ + "ms-azuretools.vscode-docker", + "redhat.vscode-yaml", + "srl-labs.vscode-containerlab", + "hediet.vscode-drawio", + "DavidAnson.vscode-markdownlint" + ] + } + }, + "postCreateCommand": "sudo mkdir -p /opt/edgeshark && sudo curl -sL https://github.com/siemens/edgeshark/raw/main/deployments/wget/docker-compose.yaml -o /opt/edgeshark/docker-compose.yaml" +} \ No newline at end of file diff --git a/.gitignore b/.gitignore index 4e3a9b1..0734728 100755 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,12 @@ .DS_Store documentation/assets/images/diagrams/.$VXLAN.drawio.bkp clab-vxlan-evpn-l2/* -containerlab/.lab_definition.yml.bak -containerlab/clab-vxlan-evpn-l2 \ No newline at end of file +netbox/ +.env +.venv +.python-version +containerlab/clab-vxlan-evpn-l2 +containerlab/.lab_vxlan.yml.bak +.vscode/settings.json +vxlan_automation.egg-info +__pycache__ diff --git a/README.md b/README.md index 51d2f4b..0f7ddc1 100755 --- a/README.md +++ b/README.md @@ -1,45 +1,38 @@ # VXLAN EVPN Automation Project -This project aims to automate the creation and management of a VXLAN EVPN test lab using ContainerLab, Arista cEOS, Nokia SRLinux, and Netbox. The automation is primarily achieved through Ansible and Python scripts. +> [!WARNING] +> Work in progress +> -🖋️ **_NOTE_**: The environment used is Debian 12: - -```bash -Distributor ID: Debian -Description: Debian GNU/Linux 12 (bookworm) -Release: 12 -Codename: bookworm -``` +This project aims to automate the creation and management of a VXLAN EVPN test lab using ContainerLab, Arista cEOS and Netbox 4.2. +The automation is primarily achieved through Netbox Render Config and Python scripts. ## Table of Contents 1. [Prerequisites](#prerequisites) 2. [Installation](#installation) 3. [Usage](#usage) -4. [Project Structure](#project-structure) -5. [Contributions](#contributions) -6. [License](#license) -7. [Sources](#sources) +4. [Sources](#sources) ## Prerequisites - Docker, ContainerLab, and Ansible installed. - Images for Arista cEOS, Nokia SRLinux, and Linux Alpine downloaded. -- Python 3.11 with the necessary libraries installed (see `requirements.txt`). +- Python 3.13 with the necessary libraries installed (see `requirements.txt`). ## Installation 1. **Clone the Repository**: ```bash - git clone https://github.com/MasqAs/projet-vxlan-automation.git + git clone https://github.com/darnodo/projet-vxlan-automation.git cd vxlan-evpn-automation-project ``` 2. **Install Python Dependencies**: ```bash - pip install -r requirements.txt + uv sync ``` 3. **Install Depedencies**: @@ -52,42 +45,18 @@ Codename: bookworm ## Usage -1. **Set Up the Lab**: +- **Set Up Lab**: - ```bash - sudo containerlab deploy --topo containerlab/lab_definition.yml - ``` + ```bash + sudo containerlab deploy --topo containerlab/fabric_vxlan.yml + ``` -2. **Configure Netbox**: +- **Set Up Netbox**: - ```bash - ansible-playbook ansible/playbooks/deploy_netbox.yml - ``` - -3. **(Additional Steps)**: - - Follow the additional instructions in `documentation/USAGE.md`. - -## Project Structure - -- `/ansible/` - Contains all Ansible playbooks, roles, variables, and inventories. -- `/python-scripts/` - Python scripts for various tasks. -- `/containerlab/` - Definitions and configurations for ContainerLab. -- `/configs/` - Initial configurations for network equipment. -- `/documentation/` - Detailed project documentation. -- `/suzieq/` - Files specific to SuzieQ. - -For more details, please refer to `documentation/STRUCTURE.md`. - -## Contributions - -Contributions are welcome! Please submit pull requests or open issues for any suggestions or corrections. - -## License - -This project is licensed under the APACHE license. See the [LICENSE](LICENSE) file for more information. + All details on installation [documentation](./documentation/INSTALLATION.md#install-netbox-and-plugins) ## Sources - [ContainerLab](https://containerlab.dev/) -- [The ASCII Construct](https://www.theasciiconstruct.com/post/multivendor-evpn-vxlan-l2-overlay/) \ No newline at end of file +- [NetBox Docker Plugin](https://github.com/netbox-community/netbox-docker/wiki/Using-Netbox-Plugins) +- [Vector Netbox](https://www.vectornetworksllc.com/post/generating-network-device-configurations-from-netbox) diff --git a/containerlab/fabric_vxlan.yml b/containerlab/fabric_vxlan.yml new file mode 100755 index 0000000..bf4f3f1 --- /dev/null +++ b/containerlab/fabric_vxlan.yml @@ -0,0 +1,86 @@ +name: vxlan-evpn-l2 +mgmt: + network: management +topology: + nodes: + padc_sp1_00: + kind: ceos + image: ceos:4.33.1F + mgmt-ipv4: 172.20.20.10 + padc_sp2_00: + kind: ceos + image: ceos:4.33.1F + mgmt-ipv4: 172.20.20.11 + pa01_lf1_00: + kind: ceos + image: ceos:4.33.1F + mgmt-ipv4: 172.20.20.100 + pa02_lf2_00: + kind: ceos + image: ceos:4.33.1F + mgmt-ipv4: 172.20.20.101 + pa03_lf3_00: + kind: ceos + image: ceos:4.33.1F + mgmt-ipv4: 172.20.20.102 + pa04_lf4_00: + kind: ceos + image: ceos:4.33.1F + mgmt-ipv4: 172.20.20.103 + pa01_sw1_00: + kind: ceos + image: ceos:4.33.1F + mgmt-ipv4: 172.20.20.110 + pa02_sw1_00: + kind: ceos + image: ceos:4.33.1F + mgmt-ipv4: 172.20.20.111 + pa03_sw1_00: + kind: ceos + image: ceos:4.33.1F + mgmt-ipv4: 172.20.20.112 + pa04_sw1_00: + kind: ceos + image: ceos:4.33.1F + mgmt-ipv4: 172.20.20.113 + host1: + kind: linux + image: alpine:latest + binds: + - hosts/h1_interfaces:/etc/network/interfaces + mgmt-ipv4: 172.20.20.21 + host2: + kind: linux + image: alpine:latest + binds: + - hosts/h2_interfaces:/etc/network/interfaces + mgmt-ipv4: 172.20.20.22 + host3: + kind: linux + image: alpine:latest + binds: + - hosts/h3_interfaces:/etc/network/interfaces + mgmt-ipv4: 172.20.20.23 + host4: + kind: linux + image: alpine:latest + binds: + - hosts/h4_interfaces:/etc/network/interfaces + mgmt-ipv4: 172.20.20.24 + links: + - endpoints: ["pa01_lf1_00:eth1", "padc_sp1_00:eth1"] + - endpoints: ["pa01_lf1_00:eth2", "padc_sp2_00:eth1"] + - endpoints: ["pa02_lf2_00:eth1", "padc_sp1_00:eth2"] + - endpoints: ["pa02_lf2_00:eth2", "padc_sp2_00:eth2"] + - endpoints: ["pa03_lf3_00:eth1", "padc_sp1_00:eth3"] + - endpoints: ["pa03_lf3_00:eth2", "padc_sp2_00:eth3"] + - endpoints: ["pa04_lf4_00:eth1", "padc_sp1_00:eth4"] + - endpoints: ["pa04_lf4_00:eth2", "padc_sp2_00:eth4"] + - endpoints: ["pa01_lf1_00:eth3", "pa01_sw1_00:eth1"] + - endpoints: ["pa02_lf2_00:eth3", "pa02_sw1_00:eth1"] + - endpoints: ["pa03_lf3_00:eth3", "pa03_sw1_00:eth1"] + - endpoints: ["pa04_lf4_00:eth3", "pa04_sw1_00:eth1"] + - endpoints: ["pa01_sw1_00:eth2", "host1:eth1"] + - endpoints: ["pa02_sw1_00:eth2", "host2:eth1"] + - endpoints: ["pa03_sw1_00:eth2", "host3:eth1"] + - endpoints: ["pa04_sw1_00:eth2", "host4:eth1"] diff --git a/containerlab/lab_definition.yml b/containerlab/lab_definition.yml deleted file mode 100755 index 4d46b0b..0000000 --- a/containerlab/lab_definition.yml +++ /dev/null @@ -1,64 +0,0 @@ -name: vxlan-evpn-l2 -topology: - nodes: - spine1: - kind: ceos - image: ceos:4.33.0F - mgmt-ipv4: 172.20.20.101 - spine2: - kind: ceos - image: ceos:4.33.0F - mgmt-ipv4: 172.20.20.102 - leaf1: - kind: ceos - image: ceos:4.33.0F - mgmt-ipv4: 172.20.20.11 - leaf2: - kind: srl - image: ghcr.io/nokia/srlinux - mgmt-ipv4: 172.20.20.12 - leaf3: - kind: srl - image: ghcr.io/nokia/srlinux - mgmt-ipv4: 172.20.20.13 - leaf4: - kind: ceos - image: ceos:4.33.0F - mgmt-ipv4: 172.20.20.14 - host1: - kind: linux - image: alpine:latest - binds: - - hosts/h1_interfaces:/etc/network/interfaces - mgmt-ipv4: 172.20.20.21 - host2: - kind: linux - image: alpine:latest - binds: - - hosts/h2_interfaces:/etc/network/interfaces - mgmt-ipv4: 172.20.20.22 - host3: - kind: linux - image: alpine:latest - binds: - - hosts/h3_interfaces:/etc/network/interfaces - mgmt-ipv4: 172.20.20.23 - host4: - kind: linux - image: alpine:latest - binds: - - hosts/h4_interfaces:/etc/network/interfaces - mgmt-ipv4: 172.20.20.24 - links: - - endpoints: ["leaf1:eth1", "spine1:eth1"] - - endpoints: ["leaf1:eth2", "spine2:eth1"] - - endpoints: ["leaf2:e1-1", "spine1:eth2"] - - endpoints: ["leaf2:e1-2", "spine2:eth2"] - - endpoints: ["leaf3:e1-1", "spine1:eth3"] - - endpoints: ["leaf3:e1-2", "spine2:eth3"] - - endpoints: ["leaf4:eth1", "spine1:eth4"] - - endpoints: ["leaf4:eth2", "spine2:eth4"] - - endpoints: ["leaf1:eth3", "host1:eth1"] - - endpoints: ["leaf2:e1-3", "host2:eth1"] - - endpoints: ["leaf3:e1-3", "host3:eth1"] - - endpoints: ["leaf4:eth3", "host4:eth1"] diff --git a/containerlab/network_images/network-images.md b/containerlab/network_images/network-images.md new file mode 100644 index 0000000..b5ce608 --- /dev/null +++ b/containerlab/network_images/network-images.md @@ -0,0 +1,4 @@ +# Network images + +Arista cEOS image can be downlaoded at : arista.com +`cEOS64-lab-4.32.0.1F.tar.xz` diff --git a/documentation/CookBook.md b/documentation/CookBook.md new file mode 100644 index 0000000..04d0db9 --- /dev/null +++ b/documentation/CookBook.md @@ -0,0 +1,18 @@ +# CookBook + +>[!WARNING] +> +> Work in progress +> + +## Prepare data + +### Popule data in Netbox + +Generate a Netbox token via webui and execute the python script + +```bash +python import.py http://localhost:8080 YOUR_NETBOX_TOKEN device_model.yml subnets.yml +``` + +## Create Fabric diff --git a/documentation/INSTALLATION.md b/documentation/INSTALLATION.md index e482159..e5beec9 100755 --- a/documentation/INSTALLATION.md +++ b/documentation/INSTALLATION.md @@ -1,8 +1,12 @@ +# Installation Guide + ## Table of Contents 1. [Installing ContainerLab](#installing-containerlab) -2. [Installing vrnetlab](#installing-vrnetlab) -3. [Installing Docker](#installing-docker) +2. [Installing Docker](#installing-docker) +3. [Images Installation](#images-installation) +4. [Install Netbox and plugins](#install-netbox-and-plugins) +5. [Sources](#sources) ## Installing ContainerLab @@ -42,6 +46,12 @@ sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin # To be able to execute docker with the current user sudo usermod -aG docker $USER + +# Create management network +docker network create \ + --driver bridge \ + --subnet=172.20.20.0/24 \ + management ``` ## Images installation @@ -52,8 +62,9 @@ To download and install the arista cEOS image, you need to be registered to [ari Once you created an account, please logged in and down the cEOS docker images. To add this new image to docker, please use the docker CLI command : + ```bash -docker import cEOS64-lab-4.30.3M.tar.xz ceos:4.30.3M +docker import cEOS64-lab-4.32.0.1F.tar.xz ceos:4.32.0.1F ``` ### Nokia SR Linux @@ -63,15 +74,119 @@ docker pull ghcr.io/nokia/srlinux ``` Now you should see images available to use : + ```bash ➜ projet-vxlan-automation git:(main) ✗ docker images REPOSITORY TAG IMAGE ID CREATED SIZE -ceos 4.30.3M 63870e68ff8d 2 days ago 1.95GB +ceos 4.32.0.1F 63870e68ff8d 2 days ago 1.95GB ghcr.io/nokia/srlinux latest 801eb020ad70 11 days ago 2.59GB ``` +## Install Netbox and plugins + + For this project, we need to install specific plugin : + - [Netbox BGP](https://github.com/netbox-community/netbox-bgp) + - [Netbox Diode](https://github.com/netboxlabs/diode) + - [Netbox Topology Views](https://github.com/netbox-community/netbox-topology-views) + + ```bash + git clone -b release https://github.com/netbox-community/netbox-docker.git netbox + cd netbox + touch plugin_requirements.txt Dockerfile-Plugins docker-compose.override.yml + cat < plugin_requirements.txt + netbox_topology_views + netboxlabs-diode-netbox-plugin + netbox-napalm-plugin + EOF + ``` + + Create the Dockerfile used to build the custom Image + + ```bash + cat << EOF > Dockerfile-Plugins + FROM netboxcommunity/netbox:v4.2 + + COPY ./plugin_requirements.txt /opt/netbox/ + RUN /usr/local/bin/uv pip install -r /opt/netbox/plugin_requirements.txt + + COPY configuration/configuration.py /etc/netbox/config/configuration.py + COPY configuration/plugins.py /etc/netbox/config/plugins.py + RUN SECRET_KEY="dummydummydummydummydummydummydummydummydummydummy" /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py collectstatic --no-input + EOF + ``` + + > [!TIP] + > This `SECRET_KEY` is only used during the installation. There's no need to change it. + + Create the `docker-compose.override.yml` + + ```bash + cat < docker-compose.override.yml + services: + netbox: + image: netbox:v4.2 + pull_policy: never + ports: + - 8000:8000 + - 8080:8080 + - 8081:8081 + build: + context: . + dockerfile: Dockerfile-Plugins + networks: + - management + netbox-worker: + image: netbox:v4.2 + pull_policy: never + networks: + - management + netbox-housekeeping: + image: netbox:v4.2 + pull_policy: never + networks: + - management + postgres: + networks: + - management + redis: + networks: + - management + redis-cache: + networks: + - management + + networks: + management: + external: true + EOF + ``` + + Enable the plugin by adding configuration in `configuration/plugins.py` + + ```python + PLUGINS = [ + "netbox-topology-views" + ] + ``` + + Build and Deploy + + ```bash + docker compose build --no-cache + docker compose up -d + ``` + + Create the first admin user : + + ```bash + docker compose exec netbox /opt/netbox/netbox/manage.py createsuperuser + ``` + + You should be able to access to netbox via port `8080` + ## Sources + - [ContainerLab](https://containerlab.dev/install/) - [vrnetlab](https://containerlab.dev/manual/vrnetlab/#vrnetlab) - [BrianLinkLetter](https://www.brianlinkletter.com/2019/03/vrnetlab-emulate-networks-using-kvm-and-docker/) -- [Docker Engine for Debian](https://docs.docker.com/engine/install/debian/) \ No newline at end of file +- [Docker Engine for Debian](https://docs.docker.com/engine/install/debian/) diff --git a/documentation/assets/images/diagrams/VXLAN.drawio b/documentation/assets/images/diagrams/VXLAN.drawio index 3d6c1c5..db9cfa2 100755 --- a/documentation/assets/images/diagrams/VXLAN.drawio +++ b/documentation/assets/images/diagrams/VXLAN.drawio @@ -1,142 +1,344 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/netbox/Dockerfile b/netbox/Dockerfile deleted file mode 100755 index 558f764..0000000 --- a/netbox/Dockerfile +++ /dev/null @@ -1,127 +0,0 @@ -ARG FROM -FROM ${FROM} as builder - -RUN export DEBIAN_FRONTEND=noninteractive \ - && apt-get update -qq \ - && apt-get upgrade \ - --yes -qq --no-install-recommends \ - && apt-get install \ - --yes -qq --no-install-recommends \ - build-essential \ - ca-certificates \ - libldap-dev \ - libpq-dev \ - libsasl2-dev \ - libssl-dev \ - libxml2-dev \ - libxmlsec1 \ - libxmlsec1-dev \ - libxmlsec1-openssl \ - libxslt-dev \ - pkg-config \ - python3-dev \ - python3-pip \ - python3-venv \ - && python3 -m venv /opt/netbox/venv \ - && /opt/netbox/venv/bin/python3 -m pip install --upgrade \ - pip \ - setuptools \ - wheel - -ARG NETBOX_PATH -COPY ${NETBOX_PATH}/requirements.txt requirements-container.txt / -RUN \ - # We compile 'psycopg' in the build process - sed -i -e '/psycopg/d' /requirements.txt && \ - # Gunicorn is not needed because we use Nginx Unit - sed -i -e '/gunicorn/d' /requirements.txt && \ - # We need 'social-auth-core[all]' in the Docker image. But if we put it in our own requirements-container.txt - # we have potential version conflicts and the build will fail. - # That's why we just replace it in the original requirements.txt. - sed -i -e 's/social-auth-core\[openidconnect\]/social-auth-core\[all\]/g' /requirements.txt && \ - /opt/netbox/venv/bin/pip install \ - -r /requirements.txt \ - -r /requirements-container.txt - -### -# Main stage -### - -ARG FROM -FROM ${FROM} as main - -RUN export DEBIAN_FRONTEND=noninteractive \ - && apt-get update -qq \ - && apt-get upgrade \ - --yes -qq --no-install-recommends \ - && apt-get install \ - --yes -qq --no-install-recommends \ - bzip2 \ - ca-certificates \ - curl \ - libldap-common \ - libpq5 \ - libxmlsec1-openssl \ - openssh-client \ - openssl \ - python3 \ - python3-distutils \ - tini \ - && curl --silent --output /usr/share/keyrings/nginx-keyring.gpg \ - https://unit.nginx.org/keys/nginx-keyring.gpg \ - && echo "deb [signed-by=/usr/share/keyrings/nginx-keyring.gpg] https://packages.nginx.org/unit/ubuntu/ lunar unit" \ - > /etc/apt/sources.list.d/unit.list \ - && apt-get update -qq \ - && apt-get install \ - --yes -qq --no-install-recommends \ - unit=1.30.0-1~lunar \ - unit-python3.11=1.30.0-1~lunar \ - && rm -rf /var/lib/apt/lists/* - -COPY --from=builder /opt/netbox/venv /opt/netbox/venv - -ARG NETBOX_PATH -COPY ${NETBOX_PATH} /opt/netbox -# Copy the modified 'requirements*.txt' files, to have the files actually used during installation -COPY --from=builder /requirements.txt /requirements-container.txt /opt/netbox/ - -COPY docker/configuration.docker.py /opt/netbox/netbox/netbox/configuration.py -COPY docker/ldap_config.docker.py /opt/netbox/netbox/netbox/ldap_config.py -COPY docker/docker-entrypoint.sh /opt/netbox/docker-entrypoint.sh -COPY docker/housekeeping.sh /opt/netbox/housekeeping.sh -COPY docker/launch-netbox.sh /opt/netbox/launch-netbox.sh -COPY configuration/ /etc/netbox/config/ -COPY docker/nginx-unit.json /etc/unit/ - -WORKDIR /opt/netbox/netbox - -# Must set permissions for '/opt/netbox/netbox/media' directory -# to g+w so that pictures can be uploaded to netbox. -RUN mkdir -p static /opt/unit/state/ /opt/unit/tmp/ \ - && chown -R unit:root /opt/unit/ media reports scripts \ - && chmod -R g+w /opt/unit/ media reports scripts \ - && cd /opt/netbox/ && SECRET_KEY="dummyKeyWithMinimumLength-------------------------" /opt/netbox/venv/bin/python -m mkdocs build \ - --config-file /opt/netbox/mkdocs.yml --site-dir /opt/netbox/netbox/project-static/docs/ \ - && SECRET_KEY="dummyKeyWithMinimumLength-------------------------" /opt/netbox/venv/bin/python /opt/netbox/netbox/manage.py collectstatic --no-input - -ENV LANG=C.utf8 PATH=/opt/netbox/venv/bin:$PATH -ENTRYPOINT [ "/usr/bin/tini", "--" ] - -CMD [ "/opt/netbox/docker-entrypoint.sh", "/opt/netbox/launch-netbox.sh" ] - -LABEL netbox.original-tag="" \ - netbox.git-branch="" \ - netbox.git-ref="" \ - netbox.git-url="" \ -# See https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys - org.opencontainers.image.created="" \ - org.opencontainers.image.title="NetBox Docker" \ - org.opencontainers.image.description="A container based distribution of NetBox, the free and open IPAM and DCIM solution." \ - org.opencontainers.image.licenses="Apache-2.0" \ - org.opencontainers.image.authors="The netbox-docker contributors." \ - org.opencontainers.image.vendor="The netbox-docker contributors." \ - org.opencontainers.image.url="https://github.com/netbox-community/netbox-docker" \ - org.opencontainers.image.documentation="https://github.com/netbox-community/netbox-docker/wiki" \ - org.opencontainers.image.source="https://github.com/netbox-community/netbox-docker.git" \ - org.opencontainers.image.revision="" \ - org.opencontainers.image.version="" diff --git a/netbox/configuration/configuration.py b/netbox/configuration/configuration.py deleted file mode 100755 index d3bffb4..0000000 --- a/netbox/configuration/configuration.py +++ /dev/null @@ -1,318 +0,0 @@ -#### -## We recommend to not edit this file. -## Create separate files to overwrite the settings. -## See `extra.py` as an example. -#### - -import re -from os import environ -from os.path import abspath, dirname, join -from typing import Any, Callable, Tuple - -# For reference see https://docs.netbox.dev/en/stable/configuration/ -# Based on https://github.com/netbox-community/netbox/blob/develop/netbox/netbox/configuration_example.py - -### -# NetBox-Docker Helper functions -### - -# Read secret from file -def _read_secret(secret_name: str, default: str | None = None) -> str | None: - try: - f = open('/run/secrets/' + secret_name, 'r', encoding='utf-8') - except EnvironmentError: - return default - else: - with f: - return f.readline().strip() - -# If the `map_fn` isn't defined, then the value that is read from the environment (or the default value if not found) is returned. -# If the `map_fn` is defined, then `map_fn` is invoked and the value (that was read from the environment or the default value if not found) -# is passed to it as a parameter. The value returned from `map_fn` is then the return value of this function. -# The `map_fn` is not invoked, if the value (that was read from the environment or the default value if not found) is None. -def _environ_get_and_map(variable_name: str, default: str | None = None, map_fn: Callable[[str], Any | None] = None) -> Any | None: - env_value = environ.get(variable_name, default) - - if env_value == None: - return env_value - - if not map_fn: - return env_value - - return map_fn(env_value) - -_AS_BOOL = lambda value : value.lower() == 'true' -_AS_INT = lambda value : int(value) -_AS_LIST = lambda value : list(filter(None, value.split(' '))) - -_BASE_DIR = dirname(dirname(abspath(__file__))) - -######################### -# # -# Required settings # -# # -######################### - -# This is a list of valid fully-qualified domain names (FQDNs) for the NetBox server. NetBox will not permit write -# access to the server via any other hostnames. The first FQDN in the list will be treated as the preferred name. -# -# Example: ALLOWED_HOSTS = ['netbox.example.com', 'netbox.internal.local'] -ALLOWED_HOSTS = environ.get('ALLOWED_HOSTS', '*').split(' ') -# ensure that '*' or 'localhost' is always in ALLOWED_HOSTS (needed for health checks) -if '*' not in ALLOWED_HOSTS and 'localhost' not in ALLOWED_HOSTS: - ALLOWED_HOSTS.append('localhost') - -# PostgreSQL database configuration. See the Django documentation for a complete list of available parameters: -# https://docs.djangoproject.com/en/stable/ref/settings/#databases -DATABASE = { - 'NAME': environ.get('DB_NAME', 'netbox'), # Database name - 'USER': environ.get('DB_USER', ''), # PostgreSQL username - 'PASSWORD': _read_secret('db_password', environ.get('DB_PASSWORD', '')), - # PostgreSQL password - 'HOST': environ.get('DB_HOST', 'localhost'), # Database server - 'PORT': environ.get('DB_PORT', ''), # Database port (leave blank for default) - 'OPTIONS': {'sslmode': environ.get('DB_SSLMODE', 'prefer')}, - # Database connection SSLMODE - 'CONN_MAX_AGE': _environ_get_and_map('DB_CONN_MAX_AGE', '300', _AS_INT), - # Max database connection age - 'DISABLE_SERVER_SIDE_CURSORS': _environ_get_and_map('DB_DISABLE_SERVER_SIDE_CURSORS', 'False', _AS_BOOL), - # Disable the use of server-side cursors transaction pooling -} - -# Redis database settings. Redis is used for caching and for queuing background tasks such as webhook events. A separate -# configuration exists for each. Full connection details are required in both sections, and it is strongly recommended -# to use two separate database IDs. -REDIS = { - 'tasks': { - 'HOST': environ.get('REDIS_HOST', 'localhost'), - 'PORT': _environ_get_and_map('REDIS_PORT', 6379, _AS_INT), - 'USERNAME': environ.get('REDIS_USERNAME', ''), - 'PASSWORD': _read_secret('redis_password', environ.get('REDIS_PASSWORD', '')), - 'DATABASE': _environ_get_and_map('REDIS_DATABASE', 0, _AS_INT), - 'SSL': _environ_get_and_map('REDIS_SSL', 'False', _AS_BOOL), - 'INSECURE_SKIP_TLS_VERIFY': _environ_get_and_map('REDIS_INSECURE_SKIP_TLS_VERIFY', 'False', _AS_BOOL), - }, - 'caching': { - 'HOST': environ.get('REDIS_CACHE_HOST', environ.get('REDIS_HOST', 'localhost')), - 'PORT': _environ_get_and_map('REDIS_CACHE_PORT', environ.get('REDIS_PORT', '6379'), _AS_INT), - 'USERNAME': environ.get('REDIS_CACHE_USERNAME', environ.get('REDIS_USERNAME', '')), - 'PASSWORD': _read_secret('redis_cache_password', environ.get('REDIS_CACHE_PASSWORD', environ.get('REDIS_PASSWORD', ''))), - 'DATABASE': _environ_get_and_map('REDIS_CACHE_DATABASE', '1', _AS_INT), - 'SSL': _environ_get_and_map('REDIS_CACHE_SSL', environ.get('REDIS_SSL', 'False'), _AS_BOOL), - 'INSECURE_SKIP_TLS_VERIFY': _environ_get_and_map('REDIS_CACHE_INSECURE_SKIP_TLS_VERIFY', environ.get('REDIS_INSECURE_SKIP_TLS_VERIFY', 'False'), _AS_BOOL), - }, -} - -# This key is used for secure generation of random numbers and strings. It must never be exposed outside of this file. -# For optimal security, SECRET_KEY should be at least 50 characters in length and contain a mix of letters, numbers, and -# symbols. NetBox will not run without this defined. For more information, see -# https://docs.djangoproject.com/en/stable/ref/settings/#std:setting-SECRET_KEY -SECRET_KEY = _read_secret('secret_key', environ.get('SECRET_KEY', '')) - - -######################### -# # -# Optional settings # -# # -######################### - -# # Specify one or more name and email address tuples representing NetBox administrators. These people will be notified of -# # application errors (assuming correct email settings are provided). -# ADMINS = [ -# # ['John Doe', 'jdoe@example.com'], -# ] - -if 'ALLOWED_URL_SCHEMES' in environ: - ALLOWED_URL_SCHEMES = _environ_get_and_map('ALLOWED_URL_SCHEMES', None, _AS_LIST) - -# Optionally display a persistent banner at the top and/or bottom of every page. HTML is allowed. To display the same -# content in both banners, define BANNER_TOP and set BANNER_BOTTOM = BANNER_TOP. -if 'BANNER_TOP' in environ: - BANNER_TOP = environ.get('BANNER_TOP', None) -if 'BANNER_BOTTOM' in environ: - BANNER_BOTTOM = environ.get('BANNER_BOTTOM', None) - -# Text to include on the login page above the login form. HTML is allowed. -if 'BANNER_LOGIN' in environ: - BANNER_LOGIN = environ.get('BANNER_LOGIN', None) - -# Maximum number of days to retain logged changes. Set to 0 to retain changes indefinitely. (Default: 90) -if 'CHANGELOG_RETENTION' in environ: - CHANGELOG_RETENTION = _environ_get_and_map('CHANGELOG_RETENTION', None, _AS_INT) - -# Maximum number of days to retain job results (scripts and reports). Set to 0 to retain job results in the database indefinitely. (Default: 90) -if 'JOB_RETENTION' in environ: - JOB_RETENTION = _environ_get_and_map('JOB_RETENTION', None, _AS_INT) -# JOBRESULT_RETENTION was renamed to JOB_RETENTION in the v3.5.0 release of NetBox. For backwards compatibility, map JOBRESULT_RETENTION to JOB_RETENTION -elif 'JOBRESULT_RETENTION' in environ: - JOB_RETENTION = _environ_get_and_map('JOBRESULT_RETENTION', None, _AS_INT) - -# API Cross-Origin Resource Sharing (CORS) settings. If CORS_ORIGIN_ALLOW_ALL is set to True, all origins will be -# allowed. Otherwise, define a list of allowed origins using either CORS_ORIGIN_WHITELIST or -# CORS_ORIGIN_REGEX_WHITELIST. For more information, see https://github.com/ottoyiu/django-cors-headers -CORS_ORIGIN_ALLOW_ALL = _environ_get_and_map('CORS_ORIGIN_ALLOW_ALL', 'False', _AS_BOOL) -CORS_ORIGIN_WHITELIST = _environ_get_and_map('CORS_ORIGIN_WHITELIST', 'https://localhost', _AS_LIST) -CORS_ORIGIN_REGEX_WHITELIST = [re.compile(r) for r in _environ_get_and_map('CORS_ORIGIN_REGEX_WHITELIST', '', _AS_LIST)] - -# Set to True to enable server debugging. WARNING: Debugging introduces a substantial performance penalty and may reveal -# sensitive information about your installation. Only enable debugging while performing testing. -# Never enable debugging on a production system. -DEBUG = _environ_get_and_map('DEBUG', 'False', _AS_BOOL) - -# This parameter serves as a safeguard to prevent some potentially dangerous behavior, -# such as generating new database schema migrations. -# Set this to True only if you are actively developing the NetBox code base. -DEVELOPER = _environ_get_and_map('DEVELOPER', 'False', _AS_BOOL) - -# Email settings -EMAIL = { - 'SERVER': environ.get('EMAIL_SERVER', 'localhost'), - 'PORT': _environ_get_and_map('EMAIL_PORT', 25, _AS_INT), - 'USERNAME': environ.get('EMAIL_USERNAME', ''), - 'PASSWORD': _read_secret('email_password', environ.get('EMAIL_PASSWORD', '')), - 'USE_SSL': _environ_get_and_map('EMAIL_USE_SSL', 'False', _AS_BOOL), - 'USE_TLS': _environ_get_and_map('EMAIL_USE_TLS', 'False', _AS_BOOL), - 'SSL_CERTFILE': environ.get('EMAIL_SSL_CERTFILE', ''), - 'SSL_KEYFILE': environ.get('EMAIL_SSL_KEYFILE', ''), - 'TIMEOUT': _environ_get_and_map('EMAIL_TIMEOUT', 10, _AS_INT), # seconds - 'FROM_EMAIL': environ.get('EMAIL_FROM', ''), -} - -# Enforcement of unique IP space can be toggled on a per-VRF basis. To enforce unique IP space within the global table -# (all prefixes and IP addresses not assigned to a VRF), set ENFORCE_GLOBAL_UNIQUE to True. -if 'ENFORCE_GLOBAL_UNIQUE' in environ: - ENFORCE_GLOBAL_UNIQUE = _environ_get_and_map('ENFORCE_GLOBAL_UNIQUE', None, _AS_BOOL) - -# Exempt certain models from the enforcement of view permissions. Models listed here will be viewable by all users and -# by anonymous users. List models in the form `.`. Add '*' to this list to exempt all models. -EXEMPT_VIEW_PERMISSIONS = _environ_get_and_map('EXEMPT_VIEW_PERMISSIONS', '', _AS_LIST) - -# HTTP proxies NetBox should use when sending outbound HTTP requests (e.g. for webhooks). -# HTTP_PROXIES = { -# 'http': 'http://10.10.1.10:3128', -# 'https': 'http://10.10.1.10:1080', -# } - -# IP addresses recognized as internal to the system. The debugging toolbar will be available only to clients accessing -# NetBox from an internal IP. -INTERNAL_IPS = _environ_get_and_map('INTERNAL_IPS', '127.0.0.1 ::1', _AS_LIST) - -# Enable GraphQL API. -if 'GRAPHQL_ENABLED' in environ: - GRAPHQL_ENABLED = _environ_get_and_map('GRAPHQL_ENABLED', None, _AS_BOOL) - -# # Enable custom logging. Please see the Django documentation for detailed guidance on configuring custom logs: -# # https://docs.djangoproject.com/en/stable/topics/logging/ -# LOGGING = {} - -# Automatically reset the lifetime of a valid session upon each authenticated request. Enables users to remain -# authenticated to NetBox indefinitely. -LOGIN_PERSISTENCE = _environ_get_and_map('LOGIN_PERSISTENCE', 'False', _AS_BOOL) - -# Setting this to True will permit only authenticated users to access any part of NetBox. By default, anonymous users -# are permitted to access most data in NetBox (excluding secrets) but not make any changes. -LOGIN_REQUIRED = _environ_get_and_map('LOGIN_REQUIRED', 'False', _AS_BOOL) - -# The length of time (in seconds) for which a user will remain logged into the web UI before being prompted to -# re-authenticate. (Default: 1209600 [14 days]) -LOGIN_TIMEOUT = _environ_get_and_map('LOGIN_TIMEOUT', 1209600, _AS_INT) - -# Setting this to True will display a "maintenance mode" banner at the top of every page. -if 'MAINTENANCE_MODE' in environ: - MAINTENANCE_MODE = _environ_get_and_map('MAINTENANCE_MODE', None, _AS_BOOL) - -# Maps provider -if 'MAPS_URL' in environ: - MAPS_URL = environ.get('MAPS_URL', None) - -# An API consumer can request an arbitrary number of objects =by appending the "limit" parameter to the URL (e.g. -# "?limit=1000"). This setting defines the maximum limit. Setting it to 0 or None will allow an API consumer to request -# all objects by specifying "?limit=0". -if 'MAX_PAGE_SIZE' in environ: - MAX_PAGE_SIZE = _environ_get_and_map('MAX_PAGE_SIZE', None, _AS_INT) - -# The file path where uploaded media such as image attachments are stored. A trailing slash is not needed. Note that -# the default value of this setting is derived from the installed location. -MEDIA_ROOT = environ.get('MEDIA_ROOT', join(_BASE_DIR, 'media')) - -# Expose Prometheus monitoring metrics at the HTTP endpoint '/metrics' -METRICS_ENABLED = _environ_get_and_map('METRICS_ENABLED', 'False', _AS_BOOL) - -# Determine how many objects to display per page within a list. (Default: 50) -if 'PAGINATE_COUNT' in environ: - PAGINATE_COUNT = _environ_get_and_map('PAGINATE_COUNT', None, _AS_INT) - -# # Enable installed plugins. Add the name of each plugin to the list. -# PLUGINS = [] - -# # Plugins configuration settings. These settings are used by various plugins that the user may have installed. -# # Each key in the dictionary is the name of an installed plugin and its value is a dictionary of settings. -# PLUGINS_CONFIG = { -# } - -# When determining the primary IP address for a device, IPv6 is preferred over IPv4 by default. Set this to True to -# prefer IPv4 instead. -if 'PREFER_IPV4' in environ: - PREFER_IPV4 = _environ_get_and_map('PREFER_IPV4', None, _AS_BOOL) - -# The default value for the amperage field when creating new power feeds. -if 'POWERFEED_DEFAULT_AMPERAGE' in environ: - POWERFEED_DEFAULT_AMPERAGE = _environ_get_and_map('POWERFEED_DEFAULT_AMPERAGE', None, _AS_INT) - -# The default value (percentage) for the max_utilization field when creating new power feeds. -if 'POWERFEED_DEFAULT_MAX_UTILIZATION' in environ: - POWERFEED_DEFAULT_MAX_UTILIZATION = _environ_get_and_map('POWERFEED_DEFAULT_MAX_UTILIZATION', None, _AS_INT) - -# The default value for the voltage field when creating new power feeds. -if 'POWERFEED_DEFAULT_VOLTAGE' in environ: - POWERFEED_DEFAULT_VOLTAGE = _environ_get_and_map('POWERFEED_DEFAULT_VOLTAGE', None, _AS_INT) - -# Rack elevation size defaults, in pixels. For best results, the ratio of width to height should be roughly 10:1. -if 'RACK_ELEVATION_DEFAULT_UNIT_HEIGHT' in environ: - RACK_ELEVATION_DEFAULT_UNIT_HEIGHT = _environ_get_and_map('RACK_ELEVATION_DEFAULT_UNIT_HEIGHT', None, _AS_INT) -if 'RACK_ELEVATION_DEFAULT_UNIT_WIDTH' in environ: - RACK_ELEVATION_DEFAULT_UNIT_WIDTH = _environ_get_and_map('RACK_ELEVATION_DEFAULT_UNIT_WIDTH', None, _AS_INT) - -# Remote authentication support -REMOTE_AUTH_ENABLED = _environ_get_and_map('REMOTE_AUTH_ENABLED', 'False', _AS_BOOL) -REMOTE_AUTH_BACKEND = _environ_get_and_map('REMOTE_AUTH_BACKEND', 'netbox.authentication.RemoteUserBackend', _AS_LIST) -REMOTE_AUTH_HEADER = environ.get('REMOTE_AUTH_HEADER', 'HTTP_REMOTE_USER') -REMOTE_AUTH_AUTO_CREATE_USER = _environ_get_and_map('REMOTE_AUTH_AUTO_CREATE_USER', 'False', _AS_BOOL) -REMOTE_AUTH_DEFAULT_GROUPS = _environ_get_and_map('REMOTE_AUTH_DEFAULT_GROUPS', '', _AS_LIST) -# REMOTE_AUTH_DEFAULT_PERMISSIONS = {} - -# This repository is used to check whether there is a new release of NetBox available. Set to None to disable the -# version check or use the URL below to check for release in the official NetBox repository. -RELEASE_CHECK_URL = environ.get('RELEASE_CHECK_URL', None) -# RELEASE_CHECK_URL = 'https://api.github.com/repos/netbox-community/netbox/releases' - -# Maximum execution time for background tasks, in seconds. -RQ_DEFAULT_TIMEOUT = _environ_get_and_map('RQ_DEFAULT_TIMEOUT', 300, _AS_INT) - -# The name to use for the csrf token cookie. -CSRF_COOKIE_NAME = environ.get('CSRF_COOKIE_NAME', 'csrftoken') - -# Cross-Site-Request-Forgery-Attack settings. If Netbox is sitting behind a reverse proxy, you might need to set the CSRF_TRUSTED_ORIGINS flag. -# Django 4.0 requires to specify the URL Scheme in this setting. An example environment variable could be specified like: -# CSRF_TRUSTED_ORIGINS=https://demo.netbox.dev http://demo.netbox.dev -CSRF_TRUSTED_ORIGINS = _environ_get_and_map('CSRF_TRUSTED_ORIGINS', '', _AS_LIST) - -# The name to use for the session cookie. -SESSION_COOKIE_NAME = environ.get('SESSION_COOKIE_NAME', 'sessionid') - -# By default, NetBox will store session data in the database. Alternatively, a file path can be specified here to use -# local file storage instead. (This can be useful for enabling authentication on a standby instance with read-only -# database access.) Note that the user as which NetBox runs must have read and write permissions to this path. -SESSION_FILE_PATH = environ.get('SESSION_FILE_PATH', environ.get('SESSIONS_ROOT', None)) - -# Time zone (default: UTC) -TIME_ZONE = environ.get('TIME_ZONE', 'UTC') - -# Date/time formatting. See the following link for supported formats: -# https://docs.djangoproject.com/en/stable/ref/templates/builtins/#date -DATE_FORMAT = environ.get('DATE_FORMAT', 'N j, Y') -SHORT_DATE_FORMAT = environ.get('SHORT_DATE_FORMAT', 'Y-m-d') -TIME_FORMAT = environ.get('TIME_FORMAT', 'g:i a') -SHORT_TIME_FORMAT = environ.get('SHORT_TIME_FORMAT', 'H:i:s') -DATETIME_FORMAT = environ.get('DATETIME_FORMAT', 'N j, Y g:i a') -SHORT_DATETIME_FORMAT = environ.get('SHORT_DATETIME_FORMAT', 'Y-m-d H:i') diff --git a/netbox/configuration/extra.py b/netbox/configuration/extra.py deleted file mode 100755 index 8bd1337..0000000 --- a/netbox/configuration/extra.py +++ /dev/null @@ -1,49 +0,0 @@ -#### -## This file contains extra configuration options that can't be configured -## directly through environment variables. -#### - -## Specify one or more name and email address tuples representing NetBox administrators. These people will be notified of -## application errors (assuming correct email settings are provided). -# ADMINS = [ -# # ['John Doe', 'jdoe@example.com'], -# ] - - -## URL schemes that are allowed within links in NetBox -# ALLOWED_URL_SCHEMES = ( -# 'file', 'ftp', 'ftps', 'http', 'https', 'irc', 'mailto', 'sftp', 'ssh', 'tel', 'telnet', 'tftp', 'vnc', 'xmpp', -# ) - -## Enable installed plugins. Add the name of each plugin to the list. -# from netbox.configuration.configuration import PLUGINS -# PLUGINS.append('my_plugin') - -## Plugins configuration settings. These settings are used by various plugins that the user may have installed. -## Each key in the dictionary is the name of an installed plugin and its value is a dictionary of settings. -# from netbox.configuration.configuration import PLUGINS_CONFIG -# PLUGINS_CONFIG['my_plugin'] = { -# 'foo': 'bar', -# 'buzz': 'bazz' -# } - - -## Remote authentication support -# REMOTE_AUTH_DEFAULT_PERMISSIONS = {} - - -## By default uploaded media is stored on the local filesystem. Using Django-storages is also supported. Provide the -## class path of the storage driver in STORAGE_BACKEND and any configuration options in STORAGE_CONFIG. For example: -# STORAGE_BACKEND = 'storages.backends.s3boto3.S3Boto3Storage' -# STORAGE_CONFIG = { -# 'AWS_ACCESS_KEY_ID': 'Key ID', -# 'AWS_SECRET_ACCESS_KEY': 'Secret', -# 'AWS_STORAGE_BUCKET_NAME': 'netbox', -# 'AWS_S3_REGION_NAME': 'eu-west-1', -# } - - -## This file can contain arbitrary Python code, e.g.: -# from datetime import datetime -# now = datetime.now().strftime("%d/%m/%Y %H:%M:%S") -# BANNER_TOP = f'This instance started on {now}.' diff --git a/netbox/configuration/ldap/extra.py b/netbox/configuration/ldap/extra.py deleted file mode 100755 index 4505197..0000000 --- a/netbox/configuration/ldap/extra.py +++ /dev/null @@ -1,28 +0,0 @@ -#### -## This file contains extra configuration options that can't be configured -## directly through environment variables. -## All vairables set here overwrite any existing found in ldap_config.py -#### - -# # This Python script inherits all the imports from ldap_config.py -# from django_auth_ldap.config import LDAPGroupQuery # Imported since not in ldap_config.py - -# # Sets a base requirement of membetship to netbox-user-ro, netbox-user-rw, or netbox-user-admin. -# AUTH_LDAP_REQUIRE_GROUP = ( -# LDAPGroupQuery("cn=netbox-user-ro,ou=groups,dc=example,dc=com") -# | LDAPGroupQuery("cn=netbox-user-rw,ou=groups,dc=example,dc=com") -# | LDAPGroupQuery("cn=netbox-user-admin,ou=groups,dc=example,dc=com") -# ) - -# # Sets LDAP Flag groups variables with example. -# AUTH_LDAP_USER_FLAGS_BY_GROUP = { -# "is_staff": ( -# LDAPGroupQuery("cn=netbox-user-ro,ou=groups,dc=example,dc=com") -# | LDAPGroupQuery("cn=netbox-user-rw,ou=groups,dc=example,dc=com") -# | LDAPGroupQuery("cn=netbox-user-admin,ou=groups,dc=example,dc=com") -# ), -# "is_superuser": "cn=netbox-user-admin,ou=groups,dc=example,dc=com", -# } - -# # Sets LDAP Mirror groups variables with example groups -# AUTH_LDAP_MIRROR_GROUPS = ["netbox-user-ro", "netbox-user-rw", "netbox-user-admin"] diff --git a/netbox/configuration/ldap/ldap_config.py b/netbox/configuration/ldap/ldap_config.py deleted file mode 100755 index 82fad72..0000000 --- a/netbox/configuration/ldap/ldap_config.py +++ /dev/null @@ -1,111 +0,0 @@ -from importlib import import_module -from os import environ - -import ldap -from django_auth_ldap.config import LDAPSearch - - -# Read secret from file -def _read_secret(secret_name, default=None): - try: - f = open('/run/secrets/' + secret_name, 'r', encoding='utf-8') - except EnvironmentError: - return default - else: - with f: - return f.readline().strip() - -# Import and return the group type based on string name -def _import_group_type(group_type_name): - mod = import_module('django_auth_ldap.config') - try: - return getattr(mod, group_type_name)() - except: - return None - -# Server URI -AUTH_LDAP_SERVER_URI = environ.get('AUTH_LDAP_SERVER_URI', '') - -# The following may be needed if you are binding to Active Directory. -AUTH_LDAP_CONNECTION_OPTIONS = { - ldap.OPT_REFERRALS: 0 -} - -AUTH_LDAP_BIND_AS_AUTHENTICATING_USER = environ.get('AUTH_LDAP_BIND_AS_AUTHENTICATING_USER', 'False').lower() == 'true' - -# Set the DN and password for the NetBox service account if needed. -if not AUTH_LDAP_BIND_AS_AUTHENTICATING_USER: - AUTH_LDAP_BIND_DN = environ.get('AUTH_LDAP_BIND_DN', '') - AUTH_LDAP_BIND_PASSWORD = _read_secret('auth_ldap_bind_password', environ.get('AUTH_LDAP_BIND_PASSWORD', '')) - -# Set a string template that describes any user’s distinguished name based on the username. -AUTH_LDAP_USER_DN_TEMPLATE = environ.get('AUTH_LDAP_USER_DN_TEMPLATE', None) - -# Enable STARTTLS for ldap authentication. -AUTH_LDAP_START_TLS = environ.get('AUTH_LDAP_START_TLS', 'False').lower() == 'true' - -# Include this setting if you want to ignore certificate errors. This might be needed to accept a self-signed cert. -# Note that this is a NetBox-specific setting which sets: -# ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER) -LDAP_IGNORE_CERT_ERRORS = environ.get('LDAP_IGNORE_CERT_ERRORS', 'False').lower() == 'true' - -# Include this setting if you want to validate the LDAP server certificates against a CA certificate directory on your server -# Note that this is a NetBox-specific setting which sets: -# ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, LDAP_CA_CERT_DIR) -LDAP_CA_CERT_DIR = environ.get('LDAP_CA_CERT_DIR', None) - -# Include this setting if you want to validate the LDAP server certificates against your own CA. -# Note that this is a NetBox-specific setting which sets: -# ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, LDAP_CA_CERT_FILE) -LDAP_CA_CERT_FILE = environ.get('LDAP_CA_CERT_FILE', None) - -AUTH_LDAP_USER_SEARCH_BASEDN = environ.get('AUTH_LDAP_USER_SEARCH_BASEDN', '') -AUTH_LDAP_USER_SEARCH_ATTR = environ.get('AUTH_LDAP_USER_SEARCH_ATTR', 'sAMAccountName') -AUTH_LDAP_USER_SEARCH_FILTER: str = environ.get( - 'AUTH_LDAP_USER_SEARCH_FILTER', f'({AUTH_LDAP_USER_SEARCH_ATTR}=%(user)s)' -) - -AUTH_LDAP_USER_SEARCH = LDAPSearch( - AUTH_LDAP_USER_SEARCH_BASEDN, ldap.SCOPE_SUBTREE, AUTH_LDAP_USER_SEARCH_FILTER -) - -# This search ought to return all groups to which the user belongs. django_auth_ldap uses this to determine group -# heirarchy. - -AUTH_LDAP_GROUP_SEARCH_BASEDN = environ.get('AUTH_LDAP_GROUP_SEARCH_BASEDN', '') -AUTH_LDAP_GROUP_SEARCH_CLASS = environ.get('AUTH_LDAP_GROUP_SEARCH_CLASS', 'group') - -AUTH_LDAP_GROUP_SEARCH_FILTER: str = environ.get( - 'AUTH_LDAP_GROUP_SEARCH_FILTER', f'(objectclass={AUTH_LDAP_GROUP_SEARCH_CLASS})' -) -AUTH_LDAP_GROUP_SEARCH = LDAPSearch( - AUTH_LDAP_GROUP_SEARCH_BASEDN, ldap.SCOPE_SUBTREE, AUTH_LDAP_GROUP_SEARCH_FILTER -) -AUTH_LDAP_GROUP_TYPE = _import_group_type(environ.get('AUTH_LDAP_GROUP_TYPE', 'GroupOfNamesType')) - -# Define a group required to login. -AUTH_LDAP_REQUIRE_GROUP = environ.get('AUTH_LDAP_REQUIRE_GROUP_DN') - -# Define special user types using groups. Exercise great caution when assigning superuser status. -AUTH_LDAP_USER_FLAGS_BY_GROUP = {} - -if AUTH_LDAP_REQUIRE_GROUP is not None: - AUTH_LDAP_USER_FLAGS_BY_GROUP = { - "is_active": environ.get('AUTH_LDAP_REQUIRE_GROUP_DN', ''), - "is_staff": environ.get('AUTH_LDAP_IS_ADMIN_DN', ''), - "is_superuser": environ.get('AUTH_LDAP_IS_SUPERUSER_DN', '') - } - -# For more granular permissions, we can map LDAP groups to Django groups. -AUTH_LDAP_FIND_GROUP_PERMS = environ.get('AUTH_LDAP_FIND_GROUP_PERMS', 'True').lower() == 'true' -AUTH_LDAP_MIRROR_GROUPS = environ.get('AUTH_LDAP_MIRROR_GROUPS', '').lower() == 'true' - -# Cache groups for one hour to reduce LDAP traffic -AUTH_LDAP_CACHE_TIMEOUT = int(environ.get('AUTH_LDAP_CACHE_TIMEOUT', 3600)) - -# Populate the Django user from the LDAP directory. -AUTH_LDAP_USER_ATTR_MAP = { - "first_name": environ.get('AUTH_LDAP_ATTR_FIRSTNAME', 'givenName'), - "last_name": environ.get('AUTH_LDAP_ATTR_LASTNAME', 'sn'), - "email": environ.get('AUTH_LDAP_ATTR_MAIL', 'mail') -} diff --git a/netbox/configuration/logging.py b/netbox/configuration/logging.py deleted file mode 100755 index d786768..0000000 --- a/netbox/configuration/logging.py +++ /dev/null @@ -1,55 +0,0 @@ -# # Remove first comment(#) on each line to implement this working logging example. -# # Add LOGLEVEL environment variable to netbox if you use this example & want a different log level. -# from os import environ - -# # Set LOGLEVEL in netbox.env or docker-compose.overide.yml to override a logging level of INFO. -# LOGLEVEL = environ.get('LOGLEVEL', 'INFO') - -# LOGGING = { - -# 'version': 1, -# 'disable_existing_loggers': False, -# 'formatters': { -# 'verbose': { -# 'format': '{levelname} {asctime} {module} {process:d} {thread:d} {message}', -# 'style': '{', -# }, -# 'simple': { -# 'format': '{levelname} {message}', -# 'style': '{', -# }, -# }, -# 'filters': { -# 'require_debug_false': { -# '()': 'django.utils.log.RequireDebugFalse', -# }, -# }, -# 'handlers': { -# 'console': { -# 'level': LOGLEVEL, -# 'filters': ['require_debug_false'], -# 'class': 'logging.StreamHandler', -# 'formatter': 'simple' -# }, -# 'mail_admins': { -# 'level': 'ERROR', -# 'class': 'django.utils.log.AdminEmailHandler', -# 'filters': ['require_debug_false'] -# } -# }, -# 'loggers': { -# 'django': { -# 'handlers': ['console'], -# 'propagate': True, -# }, -# 'django.request': { -# 'handlers': ['mail_admins'], -# 'level': 'ERROR', -# 'propagate': False, -# }, -# 'django_auth_ldap': { -# 'handlers': ['console',], -# 'level': LOGLEVEL, -# } -# } -# } diff --git a/netbox/configuration/plugins.py b/netbox/configuration/plugins.py deleted file mode 100755 index c0b1a1f..0000000 --- a/netbox/configuration/plugins.py +++ /dev/null @@ -1,13 +0,0 @@ -# Add your plugins and plugin settings here. -# Of course uncomment this file out. - -# To learn how to build images with your required plugins -# See https://github.com/netbox-community/netbox-docker/wiki/Using-Netbox-Plugins - -# PLUGINS = ["netbox_bgp"] - -# PLUGINS_CONFIG = { -# "netbox_bgp": { -# ADD YOUR SETTINGS HERE -# } -# } diff --git a/netbox/docker-compose.override.yml b/netbox/docker-compose.override.yml deleted file mode 100755 index f08d6c0..0000000 --- a/netbox/docker-compose.override.yml +++ /dev/null @@ -1,5 +0,0 @@ -version: '3.4' -services: - netbox: - ports: - - 8000:8080 diff --git a/netbox/docker-compose.yml b/netbox/docker-compose.yml deleted file mode 100755 index 2b8482c..0000000 --- a/netbox/docker-compose.yml +++ /dev/null @@ -1,90 +0,0 @@ -version: '3.4' -services: - netbox: &netbox - image: docker.io/netboxcommunity/netbox:${VERSION-v3.6-2.7.0} - depends_on: - - postgres - - redis - - redis-cache - env_file: env/netbox.env - healthcheck: - start_period: 60s - timeout: 3s - interval: 15s - test: "curl -f http://localhost:8080/api/ || exit 1" - volumes: - - ./configuration:/etc/netbox/config:z,ro - - netbox-media-files:/opt/netbox/netbox/media:rw - - netbox-reports-files:/opt/netbox/netbox/reports:rw - - netbox-scripts-files:/opt/netbox/netbox/scripts:rw - platform: linux/amd64 - netbox-worker: - <<: *netbox - depends_on: - netbox: - condition: service_healthy - command: - - /opt/netbox/venv/bin/python - - /opt/netbox/netbox/manage.py - - rqworker - healthcheck: - start_period: 20s - timeout: 3s - interval: 15s - test: "ps -aux | grep -v grep | grep -q rqworker || exit 1" - netbox-housekeeping: - <<: *netbox - depends_on: - netbox: - condition: service_healthy - command: - - /opt/netbox/housekeeping.sh - healthcheck: - start_period: 20s - timeout: 3s - interval: 15s - test: "ps -aux | grep -v grep | grep -q housekeeping || exit 1" - - # postgres - postgres: - image: docker.io/postgres:15-alpine - env_file: env/postgres.env - volumes: - - netbox-postgres-data:/var/lib/postgresql/data - platform: linux/amd64 - - # redis - redis: - image: docker.io/redis:7-alpine - command: - - sh - - -c # this is to evaluate the $REDIS_PASSWORD from the env - - redis-server --appendonly yes --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose - env_file: env/redis.env - volumes: - - netbox-redis-data:/data - platform: linux/amd64 - redis-cache: - image: docker.io/redis:7-alpine - command: - - sh - - -c # this is to evaluate the $REDIS_PASSWORD from the env - - redis-server --requirepass $$REDIS_PASSWORD ## $$ because of docker-compose - env_file: env/redis-cache.env - volumes: - - netbox-redis-cache-data:/data - platform: linux/amd64 - -volumes: - netbox-media-files: - driver: local - netbox-postgres-data: - driver: local - netbox-redis-cache-data: - driver: local - netbox-redis-data: - driver: local - netbox-reports-files: - driver: local - netbox-scripts-files: - driver: local diff --git a/netbox/docker/configuration.docker.py b/netbox/docker/configuration.docker.py deleted file mode 100755 index 413f802..0000000 --- a/netbox/docker/configuration.docker.py +++ /dev/null @@ -1,91 +0,0 @@ -## Generic Parts -# These functions are providing the functionality to load -# arbitrary configuration files. -# -# They can be imported by other code (see `ldap_config.py` for an example). - -import importlib.util -import sys -from os import scandir -from os.path import abspath, isfile - - -def _filename(f): - return f.name - - -def _import(module_name, path, loaded_configurations): - spec = importlib.util.spec_from_file_location("", path) - module = importlib.util.module_from_spec(spec) - spec.loader.exec_module(module) - sys.modules[module_name] = module - - loaded_configurations.insert(0, module) - - print(f"🧬 loaded config '{path}'") - - -def read_configurations(config_module, config_dir, main_config): - loaded_configurations = [] - - main_config_path = abspath(f"{config_dir}/{main_config}.py") - if isfile(main_config_path): - _import(f"{config_module}.{main_config}", main_config_path, loaded_configurations) - else: - print(f"⚠️ Main configuration '{main_config_path}' not found.") - - with scandir(config_dir) as it: - for f in sorted(it, key=_filename): - if not f.is_file(): - continue - - if f.name.startswith("__"): - continue - - if not f.name.endswith(".py"): - continue - - if f.name == f"{main_config}.py": - continue - - if f.name == f"{config_dir}.py": - continue - - module_name = f"{config_module}.{f.name[:-len('.py')]}".replace(".", "_") - _import(module_name, f.path, loaded_configurations) - - if len(loaded_configurations) == 0: - print(f"‼️ No configuration files found in '{config_dir}'.") - raise ImportError(f"No configuration files found in '{config_dir}'.") - - return loaded_configurations - - -## Specific Parts -# This section's code actually loads the various configuration files -# into the module with the given name. -# It contains the logic to resolve arbitrary configuration options by -# levaraging dynamic programming using `__getattr__`. - - -_loaded_configurations = read_configurations( - config_dir="/etc/netbox/config/", - config_module="netbox.configuration", - main_config="configuration", -) - - -def __getattr__(name): - for config in _loaded_configurations: - try: - return getattr(config, name) - except: - pass - raise AttributeError - - -def __dir__(): - names = [] - for config in _loaded_configurations: - names.extend(config.__dir__()) - return names diff --git a/netbox/docker/docker-entrypoint.sh b/netbox/docker/docker-entrypoint.sh deleted file mode 100755 index 9b39689..0000000 --- a/netbox/docker/docker-entrypoint.sh +++ /dev/null @@ -1,99 +0,0 @@ -#!/bin/bash -# Runs on every start of the NetBox Docker container - -# Stop when an error occures -set -e - -# Allows NetBox to be run as non-root users -umask 002 - -# Load correct Python3 env -# shellcheck disable=SC1091 -source /opt/netbox/venv/bin/activate - -# Try to connect to the DB -DB_WAIT_TIMEOUT=${DB_WAIT_TIMEOUT-3} -MAX_DB_WAIT_TIME=${MAX_DB_WAIT_TIME-30} -CUR_DB_WAIT_TIME=0 -while [ "${CUR_DB_WAIT_TIME}" -lt "${MAX_DB_WAIT_TIME}" ]; do - # Read and truncate connection error tracebacks to last line by default - exec {psfd}< <(./manage.py showmigrations 2>&1) - read -rd '' DB_ERR <&$psfd || : - exec {psfd}<&- - wait $! && break - if [ -n "$DB_WAIT_DEBUG" ]; then - echo "$DB_ERR" - else - readarray -tn 0 DB_ERR_LINES <<<"$DB_ERR" - echo "${DB_ERR_LINES[@]: -1}" - echo "[ Use DB_WAIT_DEBUG=1 in netbox.env to print full traceback for errors here ]" - fi - echo "⏳ Waiting on DB... (${CUR_DB_WAIT_TIME}s / ${MAX_DB_WAIT_TIME}s)" - sleep "${DB_WAIT_TIMEOUT}" - CUR_DB_WAIT_TIME=$((CUR_DB_WAIT_TIME + DB_WAIT_TIMEOUT)) -done -if [ "${CUR_DB_WAIT_TIME}" -ge "${MAX_DB_WAIT_TIME}" ]; then - echo "❌ Waited ${MAX_DB_WAIT_TIME}s or more for the DB to become ready." - exit 1 -fi -# Check if update is needed -if ! ./manage.py migrate --check >/dev/null 2>&1; then - echo "⚙️ Applying database migrations" - ./manage.py migrate --no-input - echo "⚙️ Running trace_paths" - ./manage.py trace_paths --no-input - echo "⚙️ Removing stale content types" - ./manage.py remove_stale_contenttypes --no-input - echo "⚙️ Removing expired user sessions" - ./manage.py clearsessions - echo "⚙️ Building search index (lazy)" - ./manage.py reindex --lazy -fi - -# Create Superuser if required -if [ "$SKIP_SUPERUSER" == "true" ]; then - echo "↩️ Skip creating the superuser" -else - if [ -z ${SUPERUSER_NAME+x} ]; then - SUPERUSER_NAME='admin' - fi - if [ -z ${SUPERUSER_EMAIL+x} ]; then - SUPERUSER_EMAIL='admin@example.com' - fi - if [ -f "/run/secrets/superuser_password" ]; then - SUPERUSER_PASSWORD="$(=7.4.1", + "pyyaml>=6.0.2", + "requests>=2.32.3", +] + +[tool.setuptools] +py-modules = [] + +[tool.ruff] +# Exclude a variety of commonly ignored directories. +exclude = [ + ".bzr", + ".direnv", + ".eggs", + ".git", + ".git-rewrite", + ".hg", + ".ipynb_checkpoints", + ".mypy_cache", + ".nox", + ".pants.d", + ".pyenv", + ".pytest_cache", + ".pytype", + ".ruff_cache", + ".svn", + ".tox", + ".venv", + ".vscode", + "__pypackages__", + "_build", + "buck-out", + "build", + "dist", + "node_modules", + "site-packages", + "venv", +] + +# Same as Black. +line-length = 88 +indent-width = 4 + +# Assume Python 3.8 +target-version = "py38" + +[tool.ruff.lint] +# Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default. +# Unlike Flake8, Ruff doesn't enable pycodestyle warnings (`W`) or +# McCabe complexity (`C901`) by default. +select = ["E4", "E7", "E9", "F"] +ignore = [] + +# Allow fix for all enabled rules (when `--fix`) is provided. +fixable = ["ALL"] +unfixable = [] + +# Allow unused variables when underscore-prefixed. +dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$" + +[tool.ruff.format] +# Like Black, use double quotes for strings. +quote-style = "double" + +# Like Black, indent with spaces, rather than tabs. +indent-style = "space" + +# Like Black, respect magic trailing commas. +skip-magic-trailing-comma = false + +# Like Black, automatically detect the appropriate line ending. +line-ending = "auto" + +# Enable auto-formatting of code examples in docstrings. Markdown, +# reStructuredText code/literal blocks and doctests are all supported. +# +# This is currently disabled by default, but it is planned for this +# to be opt-out in the future. +docstring-code-format = false + +# Set the line length limit used when formatting code snippets in +# docstrings. +# +# This only has an effect when the `docstring-code-format` setting is +# enabled. +docstring-code-line-length = "dynamic" + +[dependency-groups] +lint = [ + "ruff>=0.9.4", +] +dev = [ + "ipython>=8.32.0", +] diff --git a/templates/leaves.j2 b/templates/leaves.j2 new file mode 100644 index 0000000..b018eaf --- /dev/null +++ b/templates/leaves.j2 @@ -0,0 +1,44 @@ +{# Interface Configuration #} +{% for interface in device.interfaces.all() %} +interface {{ interface.name }} +{%- if interface.description %} +description {{ interface.description }} +{%- endif %} +no shutdown +no switchport +ip address {{ ipam.IPAddress.objects.get(assigned_object_id=interface.id).address }} +mtu 9214 +! +{% endfor %} + +{# BGP Configuration #} +{% set loopback_interface = device.interfaces.get(name='Loopback0') %} +{% set router_id = ipam.IPAddress.objects.get(assigned_object_id=loopback_interface.id).address %} +router bgp {{ device.custom_field_data.ASN }} +router-id {{ router_id }} +maximum-paths 4 ecmp 4 +neighbor SPINE_GROUP peer group +neighbor SPINE_GROUP allowas-in 1 +neighbor SPINE_GROUP ebgp-multihop 4 +neighbor SPINE_GROUP send-community extended +neighbor SPINE_GROUP maximum-routes 12000 +{%- for interface in device.interfaces.all() %} + {%- if interface.connected_endpoints and interface.name != 'Ethernet3' %} + {%- for remote_interface in interface.connected_endpoints %} + {%- set remote_ip = ipam.IPAddress.objects.get(assigned_object_id=remote_interface.id) %} +neighbor {{ remote_ip.address }} peer group SPINE_GROUP +neighbor {{ remote_ip.address }} remote-as {{ remote_interface.device.custom_field_data.ASN }} + {%- endfor %} + {%- endif %} +{%- endfor %} +! +address-family ipv4 +{%- for interface in device.interfaces.all() %} + {%- if interface.connected_endpoints and interface.name != 'Ethernet3' %} + {%- for remote_interface in interface.connected_endpoints %} + {%- set remote_ip = ipam.IPAddress.objects.get(assigned_object_id=remote_interface.id) %} +neighbor {{ remote_ip.address }} activate + {%- endfor %} + {%- endif %} +{%- endfor %} +! \ No newline at end of file diff --git a/templates/spines.j2 b/templates/spines.j2 new file mode 100644 index 0000000..7ed437b --- /dev/null +++ b/templates/spines.j2 @@ -0,0 +1,44 @@ +{# Interface Configuration #} +{% for interface in device.interfaces.all() %} +interface {{ interface.name }} +{%- if interface.description %} +description {{ interface.description }} +{%- endif %} +no shutdown +no switchport +ip address {{ ipam.IPAddress.objects.get(assigned_object_id=interface.id).address }} +mtu 9214 +! +{% endfor %} + +{# BGP Configuration #} +{% set loopback_interface = device.interfaces.get(name='Loopback0') %} +{% set router_id = ipam.IPAddress.objects.get(assigned_object_id=loopback_interface.id).address %} +router bgp {{ device.custom_field_data.ASN }} +router-id {{ router_id }} +maximum-paths 4 ecmp 4 +neighbor LEAF_GROUP peer group +neighbor LEAF_GROUP allowas-in 1 +neighbor LEAF_GROUP ebgp-multihop 4 +neighbor LEAF_GROUP send-community extended +neighbor LEAF_GROUP maximum-routes 12000 +{%- for interface in device.interfaces.all() %} + {%- if interface.connected_endpoints %} + {%- for remote_interface in interface.connected_endpoints %} + {%- set remote_ip = ipam.IPAddress.objects.get(assigned_object_id=remote_interface.id) %} +neighbor {{ remote_ip.address }} peer group LEAF_GROUP +neighbor {{ remote_ip.address }} remote-as {{ remote_interface.device.custom_field_data.ASN }} + {%- endfor %} + {%- endif %} +{%- endfor %} +! +address-family ipv4 +{%- for interface in device.interfaces.all() %} + {%- if interface.connected_endpoints %} + {%- for remote_interface in interface.connected_endpoints %} + {%- set remote_ip = ipam.IPAddress.objects.get(assigned_object_id=remote_interface.id) %} +neighbor {{ remote_ip.address }} activate + {%- endfor %} + {%- endif %} +{%- endfor %} +! \ No newline at end of file diff --git a/utilities/Create_Fabric/add_customers.py b/utilities/Create_Fabric/add_customers.py new file mode 100644 index 0000000..dfca09c --- /dev/null +++ b/utilities/Create_Fabric/add_customers.py @@ -0,0 +1,68 @@ +from helpers.netbox_backend import NetBoxBackend +import sys + +# Ask user for NetBox connection details +url = input("Enter NetBox URL: ") +token = input("Enter NetBox API Token: ") +nb_backend = NetBoxBackend(url, token) + +# Ask for customer details +customer_name = input("Enter Customer Name: ") +vlan_id = int(input("Enter VLAN ID: ")) +vni_id = int(input("Enter VNI ID: ")) + +# Get available locations +locations = list(nb_backend.nb.dcim.locations.all()) +for idx, loc in enumerate(locations): + print(f"{idx}: {loc.name}") +selected_indices = input("Select one or multiple locations by index (comma-separated): ") +selected_locations = [loc for i, loc in enumerate(locations) if str(i) in selected_indices.split(",")] + +# Create tenant +tenant = nb_backend.create_tenant(customer_name, customer_name.lower().replace(" ", "-")) + +# Update locations to attach them to the tenant +for location in selected_locations: + try: + location.tenant = tenant.id + location.save() + except Exception as e: + print(f"[ERROR] Failed to update location {location.name} with tenant: {e}") + +# Allocate /24 prefix for customer +role_id = nb_backend.nb.ipam.roles.get(slug="customerscontainer").id +parent_prefixes = list(nb_backend.nb.ipam.prefixes.filter(role_id=role_id)) +if not parent_prefixes: + print("[ERROR] No available parent prefix found.") + sys.exit(1) + +customer_prefix = nb_backend.allocate_prefix(parent_prefixes[0], 24, None, None) +if not customer_prefix: + print("[ERROR] Could not allocate /24 for customer.") + sys.exit(1) + +# Create L2VPN +l2vpn_slug = f"{customer_name.lower().replace(' ', '-')}-vpn" +l2vpn = nb_backend.create_l2vpn(vni_id, f"{customer_name}_vpn", l2vpn_slug, tenant.id) + +# Create VLAN +vlan_slug = f"{customer_name.lower().replace(' ', '-')}-vlan" +vlan = nb_backend.create_vlan(vlan_id, f"{customer_name}_vlan", vlan_slug, tenant.id) + +# Create VXLAN termination +vxlan_termination = nb_backend.create_vxlan_termination(l2vpn.id, "ipam.vlan", vlan.id) + +# Assign IP to leaf devices Ethernet3 +for location in selected_locations: + leaf_devices = nb_backend.nb.dcim.devices.filter(role="leaf", location_id=location.id) + if leaf_devices: + ip_list = nb_backend.get_available_ips_in_prefix(customer_prefix) + if len(ip_list) < len(leaf_devices): + print("[ERROR] Not enough IP addresses available in the allocated /24.") + sys.exit(1) + + for device, ip in zip(leaf_devices, ip_list): + interface = nb_backend.get_or_create_interface(device.id, "Ethernet3") + nb_backend.assign_ip_to_interface(interface, ip.address) + else: + print(f"[ERROR] No leaf devices found in location {location.name}.") diff --git a/utilities/Create_Fabric/create_vxlan_fabric.py b/utilities/Create_Fabric/create_vxlan_fabric.py new file mode 100644 index 0000000..94578b3 --- /dev/null +++ b/utilities/Create_Fabric/create_vxlan_fabric.py @@ -0,0 +1,343 @@ +#!/usr/bin/env python3 +""" +create_vxlan_fabric.py (version avec NetBoxBackend) + +Ce script illustre comment créer une fabric VXLAN sur NetBox, +notamment : + - Création ou sélection d'un site. + - Création de spines, leaves, et access. + - Création du câblage. + - Allocation automatique des /31 pour les liaisons. + - Attribution d'un /32 loopback par device. + - Attribution automatique d'ASN (Custom Field "ASN"). + +Il utilise une classe d'abstraction "NetBoxBackend" (dans NetBox_backend.py) +pour simplifier et clarifier les interactions avec l'API. +""" + +import getpass +import sys + +from helpers.netbox_backend import NetBoxBackend + + +def main(): + print("=== VXLAN Fabric Creation Script (via NetBoxBackend) ===") + + # 1) NetBox details + netbox_url = input("NetBox URL (e.g. https://netbox.local): ").strip() + if not netbox_url: + print("ERROR: NetBox URL is required.") + sys.exit(1) + + netbox_token = getpass.getpass("NetBox API Token: ") + if not netbox_token: + print("ERROR: NetBox API token is required.") + sys.exit(1) + + # 2) Init the NetBox backend wrapper + try: + nb = NetBoxBackend(netbox_url, netbox_token, verify_ssl=True) + except Exception as exc: + print(f"ERROR: Failed to connect to NetBox: {exc}") + sys.exit(1) + + # 3) Choose or create Site + existing_sites = nb.get_sites() + if not existing_sites: + print("No sites found in NetBox.") + sys.exit(1) + + print("\nExisting Sites:") + for idx, s in enumerate(existing_sites, start=1): + print(f" {idx}. {s.name} (slug={s.slug})") + + choice = input("Choose a site by number, or type 'new' to create one: ").strip().lower() + if choice == "new": + site_name = input("New site name (e.g. 'Paris'): ").strip() + site_code_input = input("New site code (e.g. 'PA'): ").strip() + if not site_name or not site_code_input: + print("ERROR: Site name and code required.") + sys.exit(1) + try: + site = nb.create_site(site_name, site_code_input.lower()) + print(f"Created new site: {site.name} ({site.slug})") + except Exception as exc: + print(f"ERROR: Failed to create site: {exc}") + sys.exit(1) + + site_clean = site.name.strip() + site_code = site_clean[:2].upper() if len(site_clean) >= 2 else site_clean.upper() + else: + try: + site_index = int(choice) + site = existing_sites[site_index - 1] + site_clean = site.name.strip() + site_code = site_clean[:2].upper() if len(site_clean) >= 2 else site_clean.upper() + except (ValueError, IndexError): + print("ERROR: Invalid site selection.") + sys.exit(1) + + # 4) Number of buildings + while True: + try: + num_buildings = int(input("How many buildings? (1–5): ").strip()) + if 1 <= num_buildings <= 5: + break + else: + print("ERROR: Please choose between 1 and 5.") + except ValueError: + print("ERROR: Invalid input. Try again.") + + # 5) Device type slugs + print("\nEnter device type slugs (must exist in NetBox).") + spine_devtype_slug = input("Spine Device Type Slug: ").strip() + leaf_devtype_slug = input("Leaf Device Type Slug: ").strip() + access_devtype_slug = input("Access Switch Device Type Slug: ").strip() + + # 6) Roles + spine_role = nb.get_device_role("spine") + if not spine_role: + print("ERROR: No device role with slug='spine'.") + sys.exit(1) + + leaf_role = nb.get_device_role("leaf") + if not leaf_role: + print("ERROR: No device role with slug='leaf'.") + sys.exit(1) + + access_role = nb.get_device_role("access") + if not access_role: + print("ERROR: No device role with slug='access'.") + sys.exit(1) + + print(f"Using roles -> Spine={spine_role.id}, Leaf={leaf_role.id}, Access={access_role.id}") + + # 7) Create / Retrieve 2 Spines + spine_names = [f"{site_code.lower()}dc_sp1_00", f"{site_code.lower()}dc_sp2_00"] + spines = [] + + for name in spine_names: + try: + new_spine = nb.create_device( + name=name, + device_type_slug=spine_devtype_slug, + role_id=spine_role.id, + site_id=site.id + ) + print(f"Spine: {new_spine.name}") + spines.append(new_spine) + except Exception as exc: + print(f"ERROR creating spine '{name}': {exc}") + sys.exit(1) + + # 8) Create Leaves + Access per building + leaves = [] + access_switches = [] + + # Helper to create/find location + def get_or_create_location(site_obj, location_name: str): + existing_loc = nb.nb.dcim.locations.get(site_id=site_obj.id, name=location_name) + if existing_loc: + print(f"Location '{existing_loc.name}' already exists; reusing.") + return existing_loc + try: + loc = nb.nb.dcim.locations.create( + name=location_name, + slug=location_name.lower(), + site=site_obj.id + ) + print(f"Created Location '{loc.name}'") + return loc + except Exception as loc_exc: + print(f"ERROR creating location '{location_name}': {loc_exc}") + sys.exit(1) + + for b_num in range(1, num_buildings + 1): + building_code = f"{site_code}{b_num}" + location = get_or_create_location(site, building_code) + + # Leaf device + leaf_name = f"{site_code.lower()}{str(b_num).zfill(2)}_lf1_00" + try: + leaf_dev = nb.create_device( + name=leaf_name, + device_type_slug=leaf_devtype_slug, + role_id=leaf_role.id, + site_id=site.id, + location_id=location.id + ) + print(f"Leaf: {leaf_dev.name}") + except Exception as exc: + print(f"ERROR creating leaf '{leaf_name}': {exc}") + sys.exit(1) + leaves.append(leaf_dev) + + # Access Switch + sw_name = f"{site_code.lower()}{str(b_num).zfill(2)}_sw1_00" + try: + acc_dev = nb.create_device( + name=sw_name, + device_type_slug=access_devtype_slug, + role_id=access_role.id, + site_id=site.id, + location_id=location.id + ) + print(f"Access Switch: {acc_dev.name}") + except Exception as exc: + print(f"ERROR creating access switch '{sw_name}': {exc}") + sys.exit(1) + access_switches.append(acc_dev) + + # 9) Cabling + def create_leaf_spine_cables(leaf_dev, spine_dev, leaf_if_name, spine_if_name): + leaf_if = nb.get_or_create_interface(leaf_dev.id, leaf_if_name) + spine_if = nb.get_or_create_interface(spine_dev.id, spine_if_name) + nb.create_cable_if_not_exists(leaf_if, spine_if) + + for i, leaf_dev in enumerate(leaves, start=1): + # Leaf <-> Spine1 sur Ethernet1 + create_leaf_spine_cables(leaf_dev, spines[0], "Ethernet1", f"Ethernet{i}") + # Leaf <-> Spine2 sur Ethernet2 + create_leaf_spine_cables(leaf_dev, spines[1], "Ethernet2", f"Ethernet{i}") + + # Leaf <-> Access Switch sur Ethernet3 (leaf) / Ethernet1 (access) + leaf_eth3 = nb.get_or_create_interface(leaf_dev.id, "Ethernet3") + acc_dev = access_switches[i - 1] + acc_if = nb.get_or_create_interface(acc_dev.id, "Ethernet1") + nb.create_cable_if_not_exists(leaf_eth3, acc_if) + + # 10) IP Assignments (/31) + ASN custom field + # 10a) Récupérer le prefix underlay + underlay_role = nb.nb.ipam.roles.get(slug="underlaycontainer") + if not underlay_role: + print("ERROR: No IPAM role 'underlaycontainer' found.") + sys.exit(1) + + underlay_pfxs = nb.nb.ipam.prefixes.filter(role_id=underlay_role.id, scope_id=site.id) + underlay_list = list(underlay_pfxs) + if not underlay_list: + print("ERROR: No underlay prefix found for this site.") + sys.exit(1) + + parent_prefix = underlay_list[0] + print(f"Using parent prefix '{parent_prefix.prefix}' for /31 allocations.") + + # 10b) Assign ASNs (spines 65001, leaves 65101) + next_spine_asn = 65001 + next_leaf_asn = 65101 + + # Spines + for spine_dev in spines: + dev_obj = nb.nb.dcim.devices.get(spine_dev.id) + if not dev_obj: + print(f"ERROR: Could not re-fetch spine '{spine_dev.name}'") + sys.exit(1) + if "ASN" not in dev_obj.custom_fields: + print(f"[WARNING] Spine '{dev_obj.name}' has no custom field 'ASN'.") + else: + dev_obj.custom_fields["ASN"] = next_spine_asn + try: + dev_obj.save() + print(f"Assigned ASN={next_spine_asn} to spine '{dev_obj.name}'.") + except Exception as exc: + print(f"ERROR saving 'ASN' on {dev_obj.name}: {exc}") + next_spine_asn += 1 + + # Leaves + for leaf_dev in leaves: + dev_obj = nb.nb.dcim.devices.get(leaf_dev.id) + if not dev_obj: + print(f"ERROR: Could not re-fetch leaf '{leaf_dev.name}'") + sys.exit(1) + if "ASN" not in dev_obj.custom_fields: + print(f"[WARNING] Leaf '{dev_obj.name}' has no custom field 'ASN'.") + else: + dev_obj.custom_fields["ASN"] = next_leaf_asn + try: + dev_obj.save() + print(f"Assigned ASN={next_leaf_asn} to leaf '{dev_obj.name}'.") + except Exception as exc: + print(f"ERROR saving 'ASN' on {dev_obj.name}: {exc}") + next_leaf_asn += 1 + + # 10c) Allouer /31 pour chaque liaison Spine<->Leaf + for i, leaf_dev in enumerate(leaves, start=1): + # Leaf.Eth1 <-> Spine1.Eth{i} + leaf_eth1 = nb.nb.dcim.interfaces.get(device_id=leaf_dev.id, name="Ethernet1") + sp1_if = nb.nb.dcim.interfaces.get(device_id=spines[0].id, name=f"Ethernet{i}") + + child_31 = nb.allocate_prefix(parent_prefix, 31, site.id, underlay_role.id) + if not child_31: + print("ERROR: Could not allocate /31 for Spine1<->Leaf.") + sys.exit(1) + ip_list = nb.get_available_ips_in_prefix(child_31) + if len(ip_list) < 2: + print("ERROR: Not enough IP addresses in newly allocated /31.") + sys.exit(1) + + nb.assign_ip_to_interface(sp1_if, ip_list[0].address) + nb.assign_ip_to_interface(leaf_eth1, ip_list[1].address) + + # Leaf.Eth2 <-> Spine2.Eth{i} + leaf_eth2 = nb.nb.dcim.interfaces.get(device_id=leaf_dev.id, name="Ethernet2") + sp2_if = nb.nb.dcim.interfaces.get(device_id=spines[1].id, name=f"Ethernet{i}") + + child_31b = nb.allocate_prefix(parent_prefix, 31, site.id, underlay_role.id) + if not child_31b: + print("ERROR: No /31 returned for Spine2<->Leaf.") + sys.exit(1) + ip_list_b = nb.get_available_ips_in_prefix(child_31b) + if len(ip_list_b) < 2: + print("ERROR: Not enough IP addresses in newly allocated /31.") + sys.exit(1) + + nb.assign_ip_to_interface(sp2_if, ip_list_b[0].address) + nb.assign_ip_to_interface(leaf_eth2, ip_list_b[1].address) + + # 11) Loopback /32 assignment + loopback_role = nb.nb.ipam.roles.get(slug="loopbackcontainer") + if not loopback_role: + print("ERROR: No IPAM role 'loopbackcontainer' found.") + sys.exit(1) + + loopback_pfxs = nb.nb.ipam.prefixes.filter(role_id=loopback_role.id, scope_id=site.id) + loopback_list = list(loopback_pfxs) + if not loopback_list: + print("ERROR: No loopback prefix found for this site.") + sys.exit(1) + + loopback_parent = loopback_list[0] + print(f"Using parent prefix '{loopback_parent.prefix}' for /32 loopback allocations.") + + for dev in spines + leaves: + # Get or create Loopback0 + loop0_if = nb.get_or_create_interface(dev.id, "Loopback0", "virtual") + if not loop0_if: + print(f"ERROR: Could not create/retrieve Loopback0 for {dev.name}") + continue + + child_32 = nb.allocate_prefix(loopback_parent, 32, site.id, loopback_role.id) + if not child_32: + print(f"ERROR: Could not allocate /32 for {dev.name}.") + continue + + ip_list_c = nb.get_available_ips_in_prefix(child_32) + if not ip_list_c: + print(f"ERROR: Not enough IP addresses in newly allocated /32 for {dev.name}.") + continue + + new_lo_ip = nb.assign_ip_to_interface(loop0_if, ip_list_c[0].address) + if new_lo_ip: + print(f"Assigned {new_lo_ip.address} to {dev.name} Loopback0.") + + print("\n=== Fabric Creation Completed ===") + print(f"Site: {site.name} (slug={site.slug})") + print("Spines:", [dev.name for dev in spines]) + print("Leaves:", [dev.name for dev in leaves]) + print("Access Switches:", [dev.name for dev in access_switches]) + print("Each leaf/spine link got a new /31, Loopback0 got a new /32, and ASNs were assigned.") + + +if __name__ == "__main__": + main() diff --git a/utilities/Create_Fabric/helpers/__init__.py b/utilities/Create_Fabric/helpers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/utilities/Create_Fabric/helpers/netbox_backend.py b/utilities/Create_Fabric/helpers/netbox_backend.py new file mode 100644 index 0000000..4426b7a --- /dev/null +++ b/utilities/Create_Fabric/helpers/netbox_backend.py @@ -0,0 +1,226 @@ +""" +NetBox_backend.py +================= +A Python class to interact with NetBox using pynetbox. +""" + +import pynetbox +from typing import Optional, List, Dict + + +class NetBoxBackend: + def __init__(self, url: str, token: str, verify_ssl: bool = True): + """ + Initializes the NetBox API connection. + """ + self.url = url + self.token = token + self.nb = pynetbox.api(self.url, token=self.token) + self.nb.http_session.verify = verify_ssl + + ## ---------------------------------- + ## TENANTS MANAGEMENT + ## ---------------------------------- + + def get_tenants(self) -> List: + """ Returns all tenants in NetBox. """ + try: + return list(self.nb.tenancy.tenants.all()) + except Exception as e: + print(f"[ERROR] Failed to fetch tenants: {e}") + return [] + + def create_tenant(self, name: str, slug: str): + """ Creates a new tenant in NetBox. """ + try: + return self.nb.tenancy.tenants.create({"name": name, "slug": slug}) + except Exception as e: + print(f"[ERROR] Failed to create tenant '{name}': {e}") + return None + + ## ---------------------------------- + ## SITES MANAGEMENT + ## ---------------------------------- + + def get_sites(self) -> List: + """ Returns all sites in NetBox. """ + try: + return list(self.nb.dcim.sites.all()) + except Exception as e: + print(f"[ERROR] Failed to fetch sites: {e}") + return [] + + def create_site(self, name: str, slug: str): + """ Creates a new site in NetBox. """ + try: + return self.nb.dcim.sites.create({"name": name, "slug": slug}) + except Exception as e: + print(f"[ERROR] Failed to create site '{name}': {e}") + return None + + ## ---------------------------------- + ## DEVICE MANAGEMENT + ## ---------------------------------- + + def get_device_type_by_slug(self, slug: str) -> Optional[Dict]: + """ Returns a device type by slug. """ + try: + return self.nb.dcim.device_types.get(slug=slug) + except Exception as e: + print(f"[ERROR] Failed to fetch device type '{slug}': {e}") + return None + + def get_device_role(self, slug: str) -> Optional[Dict]: + """ Returns a device role by slug. """ + try: + return self.nb.dcim.device_roles.get(slug=slug) + except Exception as e: + print(f"[ERROR] Failed to fetch device role '{slug}': {e}") + return None + + def create_device(self, name: str, device_type_slug: str, role_id: int, site_id: int, location_id: Optional[int] = None): + """ Creates a device in NetBox if it doesn't already exist. """ + try: + existing_device = self.nb.dcim.devices.get(name=name) + if existing_device: + return existing_device + + device_type = self.get_device_type_by_slug(device_type_slug) + if not device_type: + print(f"[ERROR] Device type '{device_type_slug}' not found.") + return None + + return self.nb.dcim.devices.create({ + "name": name, + "device_type": {"id": device_type.id}, + "role": role_id, + "site": site_id, + "location": location_id + }) + except Exception as e: + print(f"[ERROR] Failed to create device '{name}': {e}") + return None + + ## ---------------------------------- + ## INTERFACES & CABLING + ## ---------------------------------- + + def get_or_create_interface(self, device_id: int, if_name: str, if_type: str = "40gbase-x-qsfpp"): + """ Retrieves or creates an interface on a given device. """ + try: + intf = self.nb.dcim.interfaces.get(device_id=device_id, name=if_name) + if intf: + return intf + return self.nb.dcim.interfaces.create({ + "device": device_id, + "name": if_name, + "type": if_type, + }) + except Exception as e: + print(f"[ERROR] Failed to create/get interface '{if_name}': {e}") + return None + + def create_cable_if_not_exists(self, intf_a, intf_b): + """ Creates a cable between two interfaces if it doesn't exist. """ + if not intf_a or not intf_b: + print("[WARN] Missing interfaces to create cable.") + return None + try: + return self.nb.dcim.cables.create({ + "a_terminations": [{"object_type": "dcim.interface", "object_id": intf_a.id}], + "b_terminations": [{"object_type": "dcim.interface", "object_id": intf_b.id}], + "status": "connected", + }) + except Exception as e: + print(f"[ERROR] Failed to create cable: {e}") + return None + + ## ---------------------------------- + ## NETWORK MANAGEMENT + ## ---------------------------------- + + def create_vlan(self, vlan_id: int, vlan_name: str, slug:str, tenant_id: str): + """ Creates a VLAN in NetBox. """ + try: + return self.nb.ipam.vlans.create({ + "vid": vlan_id, + "name": vlan_name, + "slug": slug, + "tenant": tenant_id, + }) + except Exception as e: + print(f"[ERROR] Failed to create VLAN '{vlan_name}': {e}") + return None + + def create_l2vpn(self, vni_id: int, vpn_name: str, slug: str, tenant_id: str): + """ Creates an L2VPN in NetBox. """ + try: + return self.nb.vpn.l2vpns.create({ + "name": vpn_name, + "slug": slug, + "type": "vxlan-evpn", + "tenant": tenant_id, + "identifier": vni_id + }) + except Exception as e: + print(f"[ERROR] Failed to create L2VPN '{vpn_name}': {e}") + return None + + def create_vxlan_termination(self, l2vpn_id: int, assigned_object_type: str, assigned_object_id: int): + """ Creates a VXLAN termination for L2VPN. """ + try: + return self.nb.vpn.l2vpn_terminations.create({ + "l2vpn": l2vpn_id, + "assigned_object_type": assigned_object_type, + "assigned_object_id": assigned_object_id + }) + except Exception as e: + print(f"[ERROR] Failed to create VXLAN termination: {e}") + return None + + def allocate_prefix(self, parent_prefix, prefix_length: int, site_id: int, role_id: int): + """ + Alloue un sous-réseau enfant (ex: /31 ou /32) à partir d'un préfixe parent + via available_prefixes.create(). + """ + try: + child_prefix = parent_prefix.available_prefixes.create({ + "prefix_length": prefix_length, + "site": site_id, + "role": role_id, + }) + return child_prefix + except Exception as exc: + print(f"[ERROR] Echec de l'allocation d'un /{prefix_length} pour {parent_prefix.prefix}: {exc}") + return None + + def assign_ip_to_interface(self, interface, ip_address: str, status: str = "active"): + """ Assigns an IP address to an interface. """ + try: + return self.nb.ipam.ip_addresses.create({ + "address": ip_address, + "assigned_object_id": interface.id, + "assigned_object_type": "dcim.interface", + "status": status, + }) + except Exception as e: + print(f"[ERROR] Failed to assign IP {ip_address}: {e}") + return None + + def get_available_ips_in_prefix(self, prefix) -> List: + """ Fetches available IPs within a prefix. """ + if not hasattr(prefix, "available_ips"): + print(f"[ERROR] Invalid prefix object: {prefix}") + return [] + return list(prefix.available_ips.list()) + + def save_custom_fields(self, device, fields: Dict[str, any]): + """ Saves custom fields for a device. """ + try: + for key, value in fields.items(): + device.custom_fields[key] = value + device.save() + return True + except Exception as e: + print(f"[ERROR] Failed to save custom fields: {e}") + return False diff --git a/utilities/Devices/devices_model.yml b/utilities/Devices/devices_model.yml new file mode 100644 index 0000000..1eb9a06 --- /dev/null +++ b/utilities/Devices/devices_model.yml @@ -0,0 +1,45 @@ +manufacturers: + - name: "Arista" + slug: "arista" + +device_roles: + - name: "Access" + slug: "access" + color: "4caf50" + vm_role: false + + - name: "Leaf" + slug: "leaf" + color: "4caf50" + vm_role: false + + - name: "Spine" + slug: "spine" + color: "4caf50" + vm_role: false + +device_types: + - manufacturer: "arista" # Must match the 'slug' in manufacturers + model: "ceos" + slug: "ceos" + part_number: "cEOS" + u_height: 1 + is_full_depth: false + comments: "Arista cEOS virtual device with QSFP in 4x10G default mode" + + interfaces: + - name: "management" + type: "1000base-t" + mgmt_only: true + + # QSFP ports defaulted to 4x10G + - name: "Ethernet1" + type: "40gbase-x-qsfpp" + - name: "Ethernet2" + type: "40gbase-x-qsfpp" + - name: "Ethernet3" + type: "40gbase-x-qsfpp" + - name: "Ethernet4" + type: "40gbase-x-qsfpp" + - name: "Ethernet5" + type: "40gbase-x-qsfpp" diff --git a/utilities/IPAM/subnets.yml b/utilities/IPAM/subnets.yml new file mode 100644 index 0000000..21d4e91 --- /dev/null +++ b/utilities/IPAM/subnets.yml @@ -0,0 +1,14 @@ +Location: + Region: Europe + City: Paris + +Containers: + UnderlayContainer: + cidr: 172.16.0.0/16 + description: "Underlay container prefix" + LoopbackContainer: + cidr: 192.168.100.0/24 + description: "Loopback container prefix" + CustomersContainer: + cidr: 10.0.0.0/8 + description: "Customer container prefix" \ No newline at end of file diff --git a/utilities/VPN/Customers.yml b/utilities/VPN/Customers.yml new file mode 100644 index 0000000..958f552 --- /dev/null +++ b/utilities/VPN/Customers.yml @@ -0,0 +1,9 @@ +Tenant: + - Name: Orange + Subnets: 10.100.0.0/24 + VLAN: 100 + VNI: 1100 + - Name: Purpel + Subnets: 10.50.0.0/24 + VLAN: 50 + VNI: 1050 \ No newline at end of file diff --git a/utilities/import.py b/utilities/import.py new file mode 100644 index 0000000..2b1308d --- /dev/null +++ b/utilities/import.py @@ -0,0 +1,340 @@ +#!/usr/bin/env python3 +import sys + +import requests +import yaml + +######################################## +# Device model import +######################################## + + +def get_or_create_manufacturer(netbox_url, headers, manufacturer_name, slug): + url = f"{netbox_url}/api/dcim/manufacturers/?slug={slug}" + resp = requests.get(url, headers=headers) + resp.raise_for_status() + results = resp.json()["results"] + if results: + print( + f"[INFO] Manufacturer '{manufacturer_name}' (slug={slug}) already exists." + ) + return results[0] + + url = f"{netbox_url}/api/dcim/manufacturers/" + payload = {"name": manufacturer_name, "slug": slug} + resp = requests.post(url, headers=headers, json=payload) + resp.raise_for_status() + created = resp.json() + print(f"[INFO] Manufacturer '{manufacturer_name}' created (ID={created['id']}).") + return created + + +def get_or_create_device_role(netbox_url, headers, role): + name = role["name"] + slug = role["slug"] + url = f"{netbox_url}/api/dcim/device-roles/?slug={slug}" + resp = requests.get(url, headers=headers) + resp.raise_for_status() + results = resp.json()["results"] + if results: + print(f"[INFO] Device Role '{name}' (slug={slug}) already exists.") + return results[0] + + url = f"{netbox_url}/api/dcim/device-roles/" + payload = { + "name": name, + "slug": slug, + "color": role.get("color", "607d8b"), + "vm_role": role.get("vm_role", False), + } + resp = requests.post(url, headers=headers, json=payload) + resp.raise_for_status() + created = resp.json() + print(f"[INFO] Device Role '{name}' created (ID={created['id']}).") + return created + + +def get_or_create_device_type(netbox_url, headers, device_type, manufacturers_cache): + manufacturer_slug = device_type["manufacturer"] + if manufacturer_slug not in manufacturers_cache: + print( + f"[WARN] Manufacturer slug '{manufacturer_slug}' not found in cache. Skipping device type." + ) + return None + + manufacturer_obj = manufacturers_cache[manufacturer_slug] + slug = device_type["slug"] + + url = f"{netbox_url}/api/dcim/device-types/?slug={slug}&manufacturer_id={manufacturer_obj['id']}" + resp = requests.get(url, headers=headers) + resp.raise_for_status() + results = resp.json()["results"] + if results: + print( + f"[INFO] Device Type '{device_type['model']}' (slug={slug}) already exists." + ) + return results[0] + + create_url = f"{netbox_url}/api/dcim/device-types/" + payload = { + "manufacturer": manufacturer_obj["id"], + "model": device_type["model"], + "slug": slug, + "part_number": device_type.get("part_number", ""), + "u_height": device_type.get("u_height", 1), + "is_full_depth": device_type.get("is_full_depth", False), + "comments": device_type.get("comments", ""), + } + resp = requests.post(create_url, headers=headers, json=payload) + resp.raise_for_status() + created_dt = resp.json() + print( + f"[INFO] Device Type '{created_dt['model']}' created (ID={created_dt['id']})." + ) + + +######################################## +# Subnets import +######################################## + + +def get_or_create_region(netbox_url, headers, region_name): + url = f"{netbox_url}/api/dcim/regions/?name={region_name}" + resp = requests.get(url, headers=headers) + resp.raise_for_status() + results = resp.json()["results"] + if results: + print(f"[INFO] Region '{region_name}' already exists.") + return results[0] + + create_url = f"{netbox_url}/api/dcim/regions/" + payload = {"name": region_name, "slug": region_name.lower().replace(" ", "-")} + resp = requests.post(create_url, headers=headers, json=payload) + resp.raise_for_status() + created = resp.json() + print(f"[INFO] Region '{region_name}' created (ID={created['id']}).") + return created + + +def get_or_create_site(netbox_url, headers, site_name, region_id=None): + url = f"{netbox_url}/api/dcim/sites/?name={site_name}" + resp = requests.get(url, headers=headers) + resp.raise_for_status() + results = resp.json()["results"] + if results: + print(f"[INFO] Site '{site_name}' already exists.") + return results[0] + + create_url = f"{netbox_url}/api/dcim/sites/" + payload = {"name": site_name, "slug": site_name.lower().replace(" ", "-")} + if region_id: + payload["region"] = region_id + + resp = requests.post(create_url, headers=headers, json=payload) + resp.raise_for_status() + created = resp.json() + print(f"[INFO] Site '{site_name}' created (ID={created['id']}).") + return created + + +def get_or_create_location(netbox_url, headers, location_name, site_id): + url = f"{netbox_url}/api/dcim/locations/?name={location_name}&site_id={site_id}" + resp = requests.get(url, headers=headers) + resp.raise_for_status() + results = resp.json()["results"] + if results: + print( + f"[INFO] Location '{location_name}' already exists for site ID={site_id}." + ) + return results[0] + + create_url = f"{netbox_url}/api/dcim/locations/" + payload = {"name": location_name, "slug": location_name.lower(), "site": site_id} + resp = requests.post(create_url, headers=headers, json=payload) + resp.raise_for_status() + created = resp.json() + print(f"[INFO] Location '{location_name}' created (ID={created['id']}).") + return created + + +def get_or_create_prefix_role(netbox_url, headers, role_name): + """ + Creates or retrieves a prefix role with the given name. + We'll build the slug from the role_name. + """ + slug = role_name.lower().replace(" ", "-") + url = f"{netbox_url}/api/ipam/roles/?slug={slug}" + resp = requests.get(url, headers=headers) + resp.raise_for_status() + results = resp.json()["results"] + if results: + print(f"[INFO] Prefix Role '{role_name}' (slug={slug}) already exists.") + return results[0] + + create_url = f"{netbox_url}/api/ipam/roles/" + payload = {"name": role_name, "slug": slug} + resp = requests.post(create_url, headers=headers, json=payload) + resp.raise_for_status() + created = resp.json() + print(f"[INFO] Prefix Role '{role_name}' created (ID={created['id']}).") + return created + + +def create_container_prefix(netbox_url, headers, cidr, description, role_id, site_id): + """ + Create (or reuse) a prefix in NetBox with a given role and site. + """ + check_url = f"{netbox_url}/api/ipam/prefixes/?prefix={cidr}" + resp = requests.get(check_url, headers=headers) + resp.raise_for_status() + existing = resp.json()["results"] + if existing: + print(f"[WARN] Container prefix '{cidr}' already exists. Not recreating.") + return existing[0] + + create_url = f"{netbox_url}/api/ipam/prefixes/" + payload = { + "prefix": cidr, + "description": description, + "role": role_id, + "scope_type": "dcim.site", + "scope_id": site_id, + } + resp = requests.post(create_url, headers=headers, json=payload) + resp.raise_for_status() + new_prefix = resp.json() + print(f"[INFO] Container prefix '{cidr}' created (ID={new_prefix['id']}).") + return new_prefix + + +######################################## +# Divers Creation +######################################## + +def get_or_create_custom_field(netbox_url, headers): + field_name = "ASN" + url = f"{netbox_url}/api/extras/custom-fields/" + + # Check if the custom field already exists + response = requests.get(url, headers=headers, params={"name": field_name}) + if response.status_code == 200: + existing_fields = response.json().get("results", []) + if existing_fields: + print(f"[INFO] Custom field '{field_name}' already exists.") + return + + # Define the custom field payload + custom_field_data = { + "name": field_name, + "label": "ASN", + "type": "integer", + "description": "ASN", + "required": False, + "default": "", + "weight": 100, + "filter_logic": "loose", + "ui_visible": "always", + "is_cloneable": True, + "object_types": ["dcim.device"], + } + + # Create the custom field + create_response = requests.post(url, headers=headers, json=custom_field_data) + if create_response.status_code == 201: + print(f"[INFO] Custom field '{field_name}' created successfully.") + else: + print(f"[ERROR] Failed to create custom field: {create_response.text}") + +######################################## +# MAIN +######################################## + + +def main(): + if len(sys.argv) != 5: + print( + "Usage: python import_netbox.py " + ) + sys.exit(1) + + netbox_url = sys.argv[1].rstrip("/") + netbox_token = sys.argv[2] + device_model_file = sys.argv[3] + subnets_file = sys.argv[4] + + headers = { + "Authorization": f"Token {netbox_token}", + "Content-Type": "application/json", + "Accept": "application/json", + } + + # 1) Load device_model.yml + with open(device_model_file, "r") as f: + device_model_data = yaml.safe_load(f) + + # 2) Load subnets.yml + with open(subnets_file, "r") as f: + subnets_data = yaml.safe_load(f) + + # Divers Creation + get_or_create_custom_field(netbox_url, headers) + + ###################################################### + # device_model.yml : manufacturers, roles, types + ###################################################### + + manufacturers_cache = {} + if "manufacturers" in device_model_data: + for mf in device_model_data["manufacturers"]: + name = mf["name"] + slug = mf["slug"] + mf_obj = get_or_create_manufacturer(netbox_url, headers, name, slug) + manufacturers_cache[slug] = mf_obj + + if "device_roles" in device_model_data: + for role in device_model_data["device_roles"]: + get_or_create_device_role(netbox_url, headers, role) + + if "device_types" in device_model_data: + for dt in device_model_data["device_types"]: + get_or_create_device_type(netbox_url, headers, dt, manufacturers_cache) + + ###################################################### + # subnets.yml : Region, Site, Containers, etc. + ###################################################### + + region_name = subnets_data.get("Location", {}).get("Region", "Europe") + region_obj = get_or_create_region(netbox_url, headers, region_name) + region_id = region_obj["id"] + + city_name = subnets_data.get("Location", {}).get("City", "Paris") + site_obj = get_or_create_site(netbox_url, headers, city_name, region_id=region_id) + site_id = site_obj["id"] + + # For each container key, create a prefix role and a prefix + containers = subnets_data.get("Containers", {}) + for container_name, c_data in containers.items(): + # Attempt to fix any 'cirdr' -> 'cidr' typos by reading "cidr" if possible + cidr = c_data.get("cidr") + description = c_data.get("description", f"{container_name} prefix") + + # 1) Create a prefix role named after the container key + # e.g., container_name='UnderlayContainer' => role = UnderlayContainer + role_obj = get_or_create_prefix_role(netbox_url, headers, container_name) + role_id = role_obj["id"] + + # 2) Create the prefix with that role, attached to the site + create_container_prefix( + netbox_url, headers, cidr, description, role_id, site_id + ) + + # Optionally handle buildings as locations + buildings = subnets_data.get("Buildings", {}) + for building_name in buildings.keys(): + get_or_create_location(netbox_url, headers, building_name, site_id) + + print("[INFO] Script completed successfully!") + + +if __name__ == "__main__": + main() diff --git a/uv.lock b/uv.lock new file mode 100644 index 0000000..ad13e83 --- /dev/null +++ b/uv.lock @@ -0,0 +1,330 @@ +version = 1 +requires-python = ">=3.13.1" + +[[package]] +name = "asttokens" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918 }, +] + +[[package]] +name = "certifi" +version = "2024.7.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c2/02/a95f2b11e207f68bc64d7aae9666fed2e2b3f307748d5123dffb72a1bbea/certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b", size = 164065 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1c/d5/c84e1a17bf61d4df64ca866a1c9a913874b4e9bdc131ec689a0ad013fb36/certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90", size = 162960 }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/16/b0/572805e227f01586461c80e0fd25d65a2115599cc9dad142fee4b747c357/charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3", size = 123188 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/94/ce8e6f63d18049672c76d07d119304e1e2d7c6098f0841b51c666e9f44a0/charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda", size = 195698 }, + { url = "https://files.pythonhosted.org/packages/24/2e/dfdd9770664aae179a96561cc6952ff08f9a8cd09a908f259a9dfa063568/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313", size = 140162 }, + { url = "https://files.pythonhosted.org/packages/24/4e/f646b9093cff8fc86f2d60af2de4dc17c759de9d554f130b140ea4738ca6/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9", size = 150263 }, + { url = "https://files.pythonhosted.org/packages/5e/67/2937f8d548c3ef6e2f9aab0f6e21001056f692d43282b165e7c56023e6dd/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b", size = 142966 }, + { url = "https://files.pythonhosted.org/packages/52/ed/b7f4f07de100bdb95c1756d3a4d17b90c1a3c53715c1a476f8738058e0fa/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11", size = 144992 }, + { url = "https://files.pythonhosted.org/packages/96/2c/d49710a6dbcd3776265f4c923bb73ebe83933dfbaa841c5da850fe0fd20b/charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f", size = 147162 }, + { url = "https://files.pythonhosted.org/packages/b4/41/35ff1f9a6bd380303dea55e44c4933b4cc3c4850988927d4082ada230273/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd", size = 140972 }, + { url = "https://files.pythonhosted.org/packages/fb/43/c6a0b685fe6910d08ba971f62cd9c3e862a85770395ba5d9cad4fede33ab/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2", size = 149095 }, + { url = "https://files.pythonhosted.org/packages/4c/ff/a9a504662452e2d2878512115638966e75633519ec11f25fca3d2049a94a/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886", size = 152668 }, + { url = "https://files.pythonhosted.org/packages/6c/71/189996b6d9a4b932564701628af5cee6716733e9165af1d5e1b285c530ed/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601", size = 150073 }, + { url = "https://files.pythonhosted.org/packages/e4/93/946a86ce20790e11312c87c75ba68d5f6ad2208cfb52b2d6a2c32840d922/charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd", size = 145732 }, + { url = "https://files.pythonhosted.org/packages/cd/e5/131d2fb1b0dddafc37be4f3a2fa79aa4c037368be9423061dccadfd90091/charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407", size = 95391 }, + { url = "https://files.pythonhosted.org/packages/27/f2/4f9a69cc7712b9b5ad8fdb87039fd89abba997ad5cbe690d1835d40405b0/charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971", size = 102702 }, + { url = "https://files.pythonhosted.org/packages/0e/f6/65ecc6878a89bb1c23a086ea335ad4bf21a588990c3f535a227b9eea9108/charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85", size = 49767 }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335 }, +] + +[[package]] +name = "decorator" +version = "5.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/0c/8d907af351aa16b42caae42f9d6aa37b900c67308052d10fdce809f8d952/decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330", size = 35016 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d5/50/83c593b07763e1161326b3b8c6686f0f4b0f24d5526546bee538c89837d6/decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186", size = 9073 }, +] + +[[package]] +name = "executing" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/91/50/a9d80c47ff289c611ff12e63f7c5d13942c65d68125160cefd768c73e6e4/executing-2.2.0.tar.gz", hash = "sha256:5d108c028108fe2551d1a7b2e8b713341e2cb4fc0aa7dcf966fa4327a5226755", size = 978693 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702 }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 }, +] + +[[package]] +name = "ipython" +version = "8.32.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "decorator" }, + { name = "jedi" }, + { name = "matplotlib-inline" }, + { name = "pexpect", marker = "sys_platform != 'emscripten' and sys_platform != 'win32'" }, + { name = "prompt-toolkit" }, + { name = "pygments" }, + { name = "stack-data" }, + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/36/80/4d2a072e0db7d250f134bc11676517299264ebe16d62a8619d49a78ced73/ipython-8.32.0.tar.gz", hash = "sha256:be2c91895b0b9ea7ba49d33b23e2040c352b33eb6a519cca7ce6e0c743444251", size = 5507441 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/e1/f4474a7ecdb7745a820f6f6039dc43c66add40f1bcc66485607d93571af6/ipython-8.32.0-py3-none-any.whl", hash = "sha256:cae85b0c61eff1fc48b0a8002de5958b6528fa9c8defb1894da63f42613708aa", size = 825524 }, +] + +[[package]] +name = "jedi" +version = "0.19.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "parso" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278 }, +] + +[[package]] +name = "matplotlib-inline" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "traitlets" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899 }, +] + +[[package]] +name = "packaging" +version = "24.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d0/63/68dbb6eb2de9cb10ee4c9c14a0148804425e13c4fb20d61cce69f53106da/packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f", size = 163950 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/88/ef/eb23f262cca3c0c4eb7ab1933c3b1f03d021f2c48f54763065b6f0e321be/packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759", size = 65451 }, +] + +[[package]] +name = "parso" +version = "0.8.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/94/68e2e17afaa9169cf6412ab0f28623903be73d1b32e208d9e8e541bb086d/parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d", size = 400609 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650 }, +] + +[[package]] +name = "pexpect" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772 }, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.50" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/e1/bd15cb8ffdcfeeb2bdc215de3c3cffca11408d829e4b8416dcfe71ba8854/prompt_toolkit-3.0.50.tar.gz", hash = "sha256:544748f3860a2623ca5cd6d2795e7a14f3d0e1c3c9728359013f79877fc89bab", size = 429087 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e4/ea/d836f008d33151c7a1f62caf3d8dd782e4d15f6a43897f64480c2b8de2ad/prompt_toolkit-3.0.50-py3-none-any.whl", hash = "sha256:9b6427eb19e479d98acff65196a307c555eb567989e6d88ebbb1b509d9779198", size = 387816 }, +] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993 }, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842 }, +] + +[[package]] +name = "pygments" +version = "2.19.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7c/2d/c3338d48ea6cc0feb8446d8e6937e1408088a72a39937982cc6111d17f84/pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f", size = 4968581 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8a/0b/9fcc47d19c48b59121088dd6da2488a49d5f72dacf8262e2790a1d2c7d15/pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c", size = 1225293 }, +] + +[[package]] +name = "pynetbox" +version = "7.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/1a/f9/e6c524e5ddc4c2788ab2f5ee1ab2d9afad49cad9c7cd3a372cf5b8433ed3/pynetbox-7.4.1.tar.gz", hash = "sha256:3f82b5964ca77a608aef6cc2fc48a3961f7667fbbdbb60646655373e3dae00c3", size = 68223 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f0/4f/fab3934a0dae677e4c23381749ad379c716c6f7fbae5711ebf8fd0cf1bdc/pynetbox-7.4.1-py3-none-any.whl", hash = "sha256:f42ce4df6ce97765df91bb4cc0c0e315683d15135265270d78f595114dd20e2b", size = 35075 }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ef/e3/3af305b830494fa85d95f6d95ef7fa73f2ee1cc8ef5b495c7c3269fb835f/PyYAML-6.0.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:efdca5630322a10774e8e98e1af481aad470dd62c3170801852d752aa7a783ba", size = 181309 }, + { url = "https://files.pythonhosted.org/packages/45/9f/3b1c20a0b7a3200524eb0076cc027a970d320bd3a6592873c85c92a08731/PyYAML-6.0.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:50187695423ffe49e2deacb8cd10510bc361faac997de9efef88badc3bb9e2d1", size = 171679 }, + { url = "https://files.pythonhosted.org/packages/7c/9a/337322f27005c33bcb656c655fa78325b730324c78620e8328ae28b64d0c/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ffe8360bab4910ef1b9e87fb812d8bc0a308b0d0eef8c8f44e0254ab3b07133", size = 733428 }, + { url = "https://files.pythonhosted.org/packages/a3/69/864fbe19e6c18ea3cc196cbe5d392175b4cf3d5d0ac1403ec3f2d237ebb5/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:17e311b6c678207928d649faa7cb0d7b4c26a0ba73d41e99c4fff6b6c3276484", size = 763361 }, + { url = "https://files.pythonhosted.org/packages/04/24/b7721e4845c2f162d26f50521b825fb061bc0a5afcf9a386840f23ea19fa/PyYAML-6.0.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70b189594dbe54f75ab3a1acec5f1e3faa7e8cf2f1e08d9b561cb41b845f69d5", size = 759523 }, + { url = "https://files.pythonhosted.org/packages/2b/b2/e3234f59ba06559c6ff63c4e10baea10e5e7df868092bf9ab40e5b9c56b6/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:41e4e3953a79407c794916fa277a82531dd93aad34e29c2a514c2c0c5fe971cc", size = 726660 }, + { url = "https://files.pythonhosted.org/packages/fe/0f/25911a9f080464c59fab9027482f822b86bf0608957a5fcc6eaac85aa515/PyYAML-6.0.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:68ccc6023a3400877818152ad9a1033e3db8625d899c72eacb5a668902e4d652", size = 751597 }, + { url = "https://files.pythonhosted.org/packages/14/0d/e2c3b43bbce3cf6bd97c840b46088a3031085179e596d4929729d8d68270/PyYAML-6.0.2-cp313-cp313-win32.whl", hash = "sha256:bc2fa7c6b47d6bc618dd7fb02ef6fdedb1090ec036abab80d4681424b84c1183", size = 140527 }, + { url = "https://files.pythonhosted.org/packages/fa/de/02b54f42487e3d3c6efb3f89428677074ca7bf43aae402517bc7cca949f3/PyYAML-6.0.2-cp313-cp313-win_amd64.whl", hash = "sha256:8388ee1976c416731879ac16da0aff3f63b286ffdd57cdeb95f3f2e085687563", size = 156446 }, +] + +[[package]] +name = "requests" +version = "2.32.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/70/2bf7780ad2d390a8d301ad0b550f1581eadbd9a20f896afe06353c2a2913/requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760", size = 131218 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6", size = 64928 }, +] + +[[package]] +name = "ruff" +version = "0.9.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c0/17/529e78f49fc6f8076f50d985edd9a2cf011d1dbadb1cdeacc1d12afc1d26/ruff-0.9.4.tar.gz", hash = "sha256:6907ee3529244bb0ed066683e075f09285b38dd5b4039370df6ff06041ca19e7", size = 3599458 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b6/f8/3fafb7804d82e0699a122101b5bee5f0d6e17c3a806dcbc527bb7d3f5b7a/ruff-0.9.4-py3-none-linux_armv6l.whl", hash = "sha256:64e73d25b954f71ff100bb70f39f1ee09e880728efb4250c632ceed4e4cdf706", size = 11668400 }, + { url = "https://files.pythonhosted.org/packages/2e/a6/2efa772d335da48a70ab2c6bb41a096c8517ca43c086ea672d51079e3d1f/ruff-0.9.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6ce6743ed64d9afab4fafeaea70d3631b4d4b28b592db21a5c2d1f0ef52934bf", size = 11628395 }, + { url = "https://files.pythonhosted.org/packages/dc/d7/cd822437561082f1c9d7225cc0d0fbb4bad117ad7ac3c41cd5d7f0fa948c/ruff-0.9.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:54499fb08408e32b57360f6f9de7157a5fec24ad79cb3f42ef2c3f3f728dfe2b", size = 11090052 }, + { url = "https://files.pythonhosted.org/packages/9e/67/3660d58e893d470abb9a13f679223368ff1684a4ef40f254a0157f51b448/ruff-0.9.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37c892540108314a6f01f105040b5106aeb829fa5fb0561d2dcaf71485021137", size = 11882221 }, + { url = "https://files.pythonhosted.org/packages/79/d1/757559995c8ba5f14dfec4459ef2dd3fcea82ac43bc4e7c7bf47484180c0/ruff-0.9.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:de9edf2ce4b9ddf43fd93e20ef635a900e25f622f87ed6e3047a664d0e8f810e", size = 11424862 }, + { url = "https://files.pythonhosted.org/packages/c0/96/7915a7c6877bb734caa6a2af424045baf6419f685632469643dbd8eb2958/ruff-0.9.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:87c90c32357c74f11deb7fbb065126d91771b207bf9bfaaee01277ca59b574ec", size = 12626735 }, + { url = "https://files.pythonhosted.org/packages/0e/cc/dadb9b35473d7cb17c7ffe4737b4377aeec519a446ee8514123ff4a26091/ruff-0.9.4-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:56acd6c694da3695a7461cc55775f3a409c3815ac467279dfa126061d84b314b", size = 13255976 }, + { url = "https://files.pythonhosted.org/packages/5f/c3/ad2dd59d3cabbc12df308cced780f9c14367f0321e7800ca0fe52849da4c/ruff-0.9.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e0c93e7d47ed951b9394cf352d6695b31498e68fd5782d6cbc282425655f687a", size = 12752262 }, + { url = "https://files.pythonhosted.org/packages/c7/17/5f1971e54bd71604da6788efd84d66d789362b1105e17e5ccc53bba0289b/ruff-0.9.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1d4c8772670aecf037d1bf7a07c39106574d143b26cfe5ed1787d2f31e800214", size = 14401648 }, + { url = "https://files.pythonhosted.org/packages/30/24/6200b13ea611b83260501b6955b764bb320e23b2b75884c60ee7d3f0b68e/ruff-0.9.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfc5f1d7afeda8d5d37660eeca6d389b142d7f2b5a1ab659d9214ebd0e025231", size = 12414702 }, + { url = "https://files.pythonhosted.org/packages/34/cb/f5d50d0c4ecdcc7670e348bd0b11878154bc4617f3fdd1e8ad5297c0d0ba/ruff-0.9.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:faa935fc00ae854d8b638c16a5f1ce881bc3f67446957dd6f2af440a5fc8526b", size = 11859608 }, + { url = "https://files.pythonhosted.org/packages/d6/f4/9c8499ae8426da48363bbb78d081b817b0f64a9305f9b7f87eab2a8fb2c1/ruff-0.9.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:a6c634fc6f5a0ceae1ab3e13c58183978185d131a29c425e4eaa9f40afe1e6d6", size = 11485702 }, + { url = "https://files.pythonhosted.org/packages/18/59/30490e483e804ccaa8147dd78c52e44ff96e1c30b5a95d69a63163cdb15b/ruff-0.9.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:433dedf6ddfdec7f1ac7575ec1eb9844fa60c4c8c2f8887a070672b8d353d34c", size = 12067782 }, + { url = "https://files.pythonhosted.org/packages/3d/8c/893fa9551760b2f8eb2a351b603e96f15af167ceaf27e27ad873570bc04c/ruff-0.9.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:d612dbd0f3a919a8cc1d12037168bfa536862066808960e0cc901404b77968f0", size = 12483087 }, + { url = "https://files.pythonhosted.org/packages/23/15/f6751c07c21ca10e3f4a51ea495ca975ad936d780c347d9808bcedbd7182/ruff-0.9.4-py3-none-win32.whl", hash = "sha256:db1192ddda2200671f9ef61d9597fcef89d934f5d1705e571a93a67fb13a4402", size = 9852302 }, + { url = "https://files.pythonhosted.org/packages/12/41/2d2d2c6a72e62566f730e49254f602dfed23019c33b5b21ea8f8917315a1/ruff-0.9.4-py3-none-win_amd64.whl", hash = "sha256:05bebf4cdbe3ef75430d26c375773978950bbf4ee3c95ccb5448940dc092408e", size = 10850051 }, + { url = "https://files.pythonhosted.org/packages/c6/e6/3d6ec3bc3d254e7f005c543a661a41c3e788976d0e52a1ada195bd664344/ruff-0.9.4-py3-none-win_arm64.whl", hash = "sha256:585792f1e81509e38ac5123492f8875fbc36f3ede8185af0a26df348e5154f41", size = 10078251 }, +] + +[[package]] +name = "stack-data" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asttokens" }, + { name = "executing" }, + { name = "pure-eval" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521 }, +] + +[[package]] +name = "traitlets" +version = "5.14.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359 }, +] + +[[package]] +name = "urllib3" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369 }, +] + +[[package]] +name = "vxlan-automation" +source = { virtual = "." } +dependencies = [ + { name = "pynetbox" }, + { name = "pyyaml" }, + { name = "requests" }, +] + +[package.dev-dependencies] +dev = [ + { name = "ipython" }, +] +lint = [ + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "pynetbox", specifier = ">=7.4.1" }, + { name = "pyyaml", specifier = ">=6.0.2" }, + { name = "requests", specifier = ">=2.32.3" }, +] + +[package.metadata.requires-dev] +dev = [{ name = "ipython", specifier = ">=8.32.0" }] +lint = [{ name = "ruff", specifier = ">=0.9.4" }] + +[[package]] +name = "wcwidth" +version = "0.2.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301 } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166 }, +]