Compare commits
24 Commits
80adb79196
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
8b35d3068b | ||
2b18644024 | |||
fbd87e01c5 | |||
a67a99f849 | |||
6da53a8907 | |||
7b4b922923 | |||
87e1d24a86 | |||
83480ed3a8 | |||
5ec57150b1 | |||
bd80c1c6cc | |||
9e125bf9f7 | |||
a0cf93a6be | |||
5c92020169 | |||
84479c786a | |||
5636d92474 | |||
0518d1ba48 | |||
f579000a96 | |||
752cea15d4 | |||
5be06ec11f | |||
58369fccaf | |||
4c46af63e3 | |||
755128a5c7 | |||
76edbf0624 | |||
280a013c48 |
@@ -1,8 +1,13 @@
|
||||
# User defined hostname persisted across all sessions, used to keep track of the same user
|
||||
# Set to $NODE_NAME to use the hostname of the node when running a cluster with Hetzner Cloud CLI
|
||||
TELEGRAF_HOSTNAME=
|
||||
# MongoDB connection string
|
||||
TELEGRAF_MONGODB_DSN=mongodb://stats_user:%40z%5EVFhN7q%25vzit@tube.kobim.cloud:27107/?authSource=statistics
|
||||
# MongoDB database name to store the data
|
||||
TELEGRAF_MONGODB_DATABASE=statistics
|
||||
# URL of the video to be analyzed
|
||||
VIDEO_URL=https://tube.kobim.cloud/w/iN2T8PmbSb4HJTDA2rV3sg
|
||||
VIDEO_URL=https://tube.kobim.cloud/w/eT1NZibmwMy6bx6N2YGLwr
|
||||
# Selenium Grid Hub URL
|
||||
#HUB_URL=http://localhost:4444
|
||||
# Socket port to send and listen for incoming data
|
||||
#SOCKET_PORT=8094
|
||||
|
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
server/peertube[[:space:]]data/statistics.peertube_hetzner_default_latency.json filter=lfs diff=lfs merge=lfs -text
|
||||
server/peertube[[:space:]]data/statistics.peertube_hetzner_high_latency.json filter=lfs diff=lfs merge=lfs -text
|
14
.github/actions/setup-docker-environment/action.yml
vendored
Normal file
14
.github/actions/setup-docker-environment/action.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: "Setup Docker Environment"
|
||||
description: "Common steps for setting up Docker build environment (checkout, QEMU, and Buildx)"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4.2.2
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.4.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.9.0
|
24
.github/workflows/feature-pr-build.yml
vendored
24
.github/workflows/feature-pr-build.yml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Build Docker Image for Feature PRs
|
||||
name: Build Docker Images for Pull Request
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
@@ -10,20 +10,28 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4.2.2
|
||||
- name: Checkout repository actions
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.4.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.9.0
|
||||
- name: Setup Docker Environment
|
||||
uses: ./.github/actions/setup-docker-environment
|
||||
|
||||
- name: Build Docker image
|
||||
uses: docker/build-push-action@v6.13.0
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
tags: ${{ github.repository_owner }}/${{ github.event.repository.name }}:${{ github.event.pull_request.number }}
|
||||
platforms: |
|
||||
linux/amd64
|
||||
linux/arm64
|
||||
|
||||
- name: Build monolith Docker image
|
||||
uses: docker/build-push-action@v6.13.0
|
||||
with:
|
||||
context: .
|
||||
tags: ${{ github.repository_owner }}/${{ github.event.repository.name }}:${{ github.event.pull_request.number }}-monolith
|
||||
file: ./Monolith.dockerfile
|
||||
platforms: |
|
||||
linux/amd64
|
||||
linux/arm64
|
11
.github/workflows/main.yml
vendored
11
.github/workflows/main.yml
vendored
@@ -12,14 +12,11 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4.2.2
|
||||
- name: Checkout repository actions
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.4.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.9.0
|
||||
- name: Setup Docker Environment
|
||||
uses: ./.github/actions/setup-docker-environment
|
||||
|
||||
- name: Log in to Docker registry
|
||||
uses: docker/login-action@v3.3.0
|
||||
|
55
.github/workflows/monolith.yml
vendored
Normal file
55
.github/workflows/monolith.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: Build and Push Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
REGISTRY_URL: gitea.kobim.cloud
|
||||
DOCKERHUB_USERNAME: kobimex
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository actions
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Docker Environment
|
||||
uses: ./.github/actions/setup-docker-environment
|
||||
|
||||
- name: Log in to custom Docker registry
|
||||
uses: docker/login-action@v3.3.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY_URL }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.REGISTRY_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image to custom registry
|
||||
uses: docker/build-push-action@v6.13.0
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ env.REGISTRY_URL }}/${{ github.repository_owner }}/${{ github.event.repository.name }}-monolith:latest
|
||||
file: ./Monolith.dockerfile
|
||||
platforms: |
|
||||
linux/amd64
|
||||
linux/arm64
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3.3.0
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image to Docker Hub
|
||||
uses: docker/build-push-action@v6.13.0
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ env.DOCKERHUB_USERNAME }}/${{ github.event.repository.name }}-monolith:latest
|
||||
file: ./Monolith.dockerfile
|
||||
platforms: |
|
||||
linux/amd64
|
||||
linux/arm64
|
9
.gitignore
vendored
9
.gitignore
vendored
@@ -262,6 +262,7 @@ TSWLatexianTemp*
|
||||
|
||||
# gummi
|
||||
.*.swp
|
||||
*.swp
|
||||
|
||||
# KBibTeX
|
||||
*~[0-9]*
|
||||
@@ -293,7 +294,15 @@ TSWLatexianTemp*
|
||||
.ipynb_checkpoints/
|
||||
env/
|
||||
.env
|
||||
.env.hetzner
|
||||
__pycache__/
|
||||
test/
|
||||
venv/
|
||||
.venv/
|
||||
|
||||
# Node.js
|
||||
node_modules/
|
||||
npm-debug.log
|
||||
yarn-error.log
|
||||
yarn-debug.log*
|
||||
background.bundle.js
|
18
Dockerfile
18
Dockerfile
@@ -1,8 +1,15 @@
|
||||
FROM python:3.13.1-slim-bookworm
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
# Install dependencies
|
||||
# Install Python and curl
|
||||
RUN apt-get update && apt-get install -y python3 python3-pip python3-venv curl
|
||||
|
||||
# Create and activate a virtual environment
|
||||
RUN python3 -m venv /app/venv
|
||||
ENV PATH="/app/venv/bin:$PATH"
|
||||
|
||||
# Install dependencies with venv
|
||||
COPY requirements.txt /app/requirements.txt
|
||||
RUN pip install --no-cache-dir -r /app/requirements.txt
|
||||
RUN /app/venv/bin/pip install -r /app/requirements.txt
|
||||
|
||||
# Copy the application
|
||||
COPY main.py /app
|
||||
@@ -10,4 +17,7 @@ COPY utils/ /app/utils
|
||||
WORKDIR /app
|
||||
|
||||
# Run the application
|
||||
CMD ["python", "main.py"]
|
||||
CMD ["/app/venv/bin/python", "main.py"]
|
||||
|
||||
# Healthcheck
|
||||
HEALTHCHECK --interval=5s --timeout=10s --retries=5 --start-period=5s CMD curl -f http://localhost:9092/heartbeat || exit 1
|
21
LICENSE
Normal file
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Mirko Milovanovic
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
50
Monolith.dockerfile
Normal file
50
Monolith.dockerfile
Normal file
@@ -0,0 +1,50 @@
|
||||
FROM node:22.14.0-bookworm-slim AS build
|
||||
|
||||
# Copy the webrtc-internals-exporter files
|
||||
COPY webrtc-internals-exporter /tmp/webrtc-internals-exporter
|
||||
|
||||
WORKDIR /tmp/webrtc-internals-exporter/webpack
|
||||
|
||||
# Install dependencies
|
||||
RUN --mount=type=cache,target=/root/.npm \
|
||||
npm install
|
||||
|
||||
# Build the project
|
||||
RUN npm run build
|
||||
|
||||
FROM selenium/standalone-chromium:129.0
|
||||
|
||||
# Install Python-virtualenv
|
||||
RUN sudo apt-get update && sudo sudo apt-get install -y python3-venv
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
# Install Telegraf
|
||||
RUN wget -q https://repos.influxdata.com/influxdata-archive_compat.key && \
|
||||
echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | sha256sum -c && \
|
||||
cat influxdata-archive_compat.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null && \
|
||||
echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list && \
|
||||
sudo apt-get update && sudo apt-get install -y telegraf
|
||||
|
||||
# Create and activate a virtual environment
|
||||
RUN python3 -m venv ./venv
|
||||
ENV PATH="/tmp/venv/bin:$PATH"
|
||||
|
||||
# Install dependencies with venv
|
||||
COPY requirements.txt ./requirements.txt
|
||||
RUN ./venv/bin/pip install -r ./requirements.txt
|
||||
|
||||
# Copy files
|
||||
COPY main.py .
|
||||
COPY utils/ ./utils
|
||||
COPY telegraf.conf ./telegraf.conf
|
||||
COPY webrtc-internals-exporter /tmp/webrtc-internals-exporter
|
||||
COPY --from=build /tmp/webrtc-internals-exporter/background.bundle.js /tmp/webrtc-internals-exporter/background.bundle.js
|
||||
COPY --chown="${SEL_UID}:${SEL_GID}" monolith-entrypoint.sh /opt/bin/collector.sh
|
||||
|
||||
# Run the entrypoint
|
||||
RUN chmod +x /opt/bin/collector.sh
|
||||
ENTRYPOINT ["/opt/bin/collector.sh"]
|
||||
|
||||
# Healthcheck
|
||||
HEALTHCHECK --interval=5s --timeout=10s --retries=5 --start-period=5s CMD curl -f http://localhost:9092/heartbeat || exit 1
|
91
README.md
91
README.md
@@ -10,19 +10,12 @@ peertube-collector is a project designed to collect and analyze WebRTC statistic
|
||||
- Docker Engine Community version is required. To install Docker CE, follow the official [install instructions](https://docs.docker.com/engine/install/).
|
||||
|
||||
### Ports:
|
||||
#### Localhost (REQUIRED):
|
||||
- 4444 (Selenium)
|
||||
|
||||
Ports can be opened in the host machine's firewall with:
|
||||
```sh
|
||||
ufw allow from 172.30.0.0/16 to any port 4444
|
||||
```
|
||||
|
||||
#### External (OPTIONAL):
|
||||
These ports are actively used by selenium and the collector services. By defaut they should not be blocked by the firewall, but if so, they can be opened in the host machine's firewall.
|
||||
#### External (OPTIONAL PROBABLY NOT NEEDED!!!):
|
||||
These ports are actively used by selenium and the collector services.
|
||||
|
||||
- 50000:60000/udp (WebRTC)
|
||||
- Needed for WebRTC NAT traversal, otherwise the browser will not connect to any peer.
|
||||
- WebRTC NAT traversal requires a range of ports to be open.
|
||||
The range needs to be fairly large since the port is chosen randomly by the STUN server.
|
||||
- 27107/tcp (MongoDB)
|
||||
|
||||
@@ -31,36 +24,67 @@ Ports can be opened in the host machine's firewall with:
|
||||
ufw allow 50000:60000/udp
|
||||
ufw allow 27107/tcp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Setup
|
||||
## Setup with Docker Compose
|
||||
|
||||
1. Clone the repository:
|
||||
```sh
|
||||
git clone <repository-url>
|
||||
cd peertube-collector
|
||||
```
|
||||
|
||||
2. Create and configure the environment file based on the `.env.example` file:
|
||||
```sh
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
3. Ajust the firewall settings to allow the necessary ports if needed
|
||||
|
||||
4. Start the Docker containers:
|
||||
```sh
|
||||
docker compose up
|
||||
```
|
||||
or in detached mode:
|
||||
```sh
|
||||
docker compose up -d
|
||||
docker compose up --abort-on-container-failure
|
||||
```
|
||||
|
||||
The collector will start gathering WebRTC stats from the Selenium container and sending them to the Telegraf service.
|
||||
|
||||
To stop the Docker containers run: `docker compose down -v`
|
||||
|
||||
The collector will start gathering WebRTC stats from the Selenium container and sending them to the Telegraf service.
|
||||
### Setup with Monolithic image:
|
||||
|
||||
1. Clone the repository:
|
||||
```sh
|
||||
git clone <repository-url>
|
||||
cd peertube-collector
|
||||
```
|
||||
2. Create and configure the environment file based on the `.env.example` file:
|
||||
```sh
|
||||
cp .env.example .env
|
||||
```
|
||||
3. Ajust the firewall settings to allow the necessary ports if needed
|
||||
4. Start the Docker container:
|
||||
```sh
|
||||
docker run --rm -p 7900:7900 --env-file .env --name peertube-collector --pull always --shm-size="2g" gitea.kobim.cloud/kobim/peertube-collector-monolith:latest
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```sh
|
||||
docker run --rm -p 7900:7900 --env-file .env --name peertube-collector --pull always --shm-size="2g" kobimex/peertube-collector-monolith:latest
|
||||
```
|
||||
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Environment Variable | Service | Default Value | Description |
|
||||
| ------------------------------- | -------------------- | ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `TELEGRAF_HOSTNAME` | telegraf | None, **must** be set | Hostname used to identify the host/user between sessions |
|
||||
| `TELEGRAF_MONGODB_DSN` | telegraf | `mongodb://stats_user...` | DSN for the MongoDB service |
|
||||
| `TELEGRAF_MONGODB_DATABASE` | telegraf | `statistics` | Database name for the MongoDB service |
|
||||
| `VIDEO_URL` | collector | `https://tube.kobim.cloud/...` | URL for the video to be analyzed |
|
||||
| `HUB_URL` | collector | None | URL for the Selenium Hub. If not set, the local Chrome driver will be used |
|
||||
| `SOCKET_URL` | collector | `localhost` | Socket URL for Telegraf service |
|
||||
| `SOCKET_PORT` | collector & telegraf | `8094` | Socket port for Telegraf service |
|
||||
| `WEBRTC_INTERNALS_PATH` | collector | None | **Absolute** path for WebRTC internals exporter extension. When **not** set the extension path is construced relative to the current main script location. |
|
||||
| `WEBRTC_INTERNALS_EXPORTER_URL` | WebRTC extension | `http://localhost:9092` | Server URL for the WebRTC internals exporter extension |
|
||||
|
||||
Variables can be set in the `.env` file.
|
||||
An example configuration is provided in the `.env.example` file.
|
||||
|
||||
### Monitoring
|
||||
A noVNC server is available at [http://localhost:7900](http://localhost:7900/?autoconnect=1&resize=scale&password=secret) to monitor the Selenium container. The password is `secret`.
|
||||
@@ -78,6 +102,10 @@ The `docker-compose.yml` file defines the following services:
|
||||
|
||||
The `Dockerfile` sets up the Python environment and installs the necessary dependencies to run the `main.py` script.
|
||||
|
||||
### Monolithic Dockerfile
|
||||
|
||||
`Monolith.dockerfile` is a single Dockerfile that combines the Selenium, Telegraf, and Collector services into a single container. This is useful for deployment in a single container environment.
|
||||
|
||||
### Main Python Script
|
||||
|
||||
The `main.py` script sets up the Selenium WebDriver, collects WebRTC stats, and sends them to the Telegraf service.
|
||||
@@ -85,17 +113,10 @@ The `main.py` script sets up the Selenium WebDriver, collects WebRTC stats, and
|
||||
### WebRTC Internals Exporter
|
||||
|
||||
The `webrtc-internals-exporter` directory contains a Chromium extension that collects WebRTC stats from the browser.
|
||||
It uses Webpack to replace the server collector endpoint with an environment variable.
|
||||
|
||||
## Working Project Structure
|
||||
# Credits
|
||||
|
||||
```
|
||||
peertube-collector/
|
||||
├── requirements.txt
|
||||
├── telegraf.conf
|
||||
├── docker-compose.yml
|
||||
├── Dockerfile
|
||||
├── main.py
|
||||
├── .env
|
||||
└── utils/
|
||||
└── webrtc-internals-exporter/
|
||||
```
|
||||
- [WebRTC Internals Exporter](https://github.com/vpalmisano/webrtc-internals-exporter)
|
||||
- [WebRTC debugging with Prometheus/Grafana](https://medium.com/@vpalmisano/webrtc-debugging-with-prometheus-grafana-254b6ac71063)
|
||||
- [MongoDB Docker Compose examples](https://github.com/TGITS/docker-compose-examples/tree/main/mongodb-docker-compose-examples)
|
@@ -3,18 +3,19 @@ services:
|
||||
container_name: selenium-standalone-chromium
|
||||
image: selenium/standalone-chromium:129.0
|
||||
volumes:
|
||||
- ./webrtc-internals-exporter:/tmp/webrtc-internals-exporter:ro
|
||||
- build-extension:/tmp/webrtc-internals-exporter
|
||||
shm_size: "2g"
|
||||
attach: false
|
||||
depends_on:
|
||||
telegraf:
|
||||
condition: service_healthy
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:4444/wd/hub/status"]
|
||||
interval: 5s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
network_mode: host
|
||||
pull_policy: always
|
||||
ports:
|
||||
- "7900:7900"
|
||||
networks:
|
||||
- backend
|
||||
|
||||
telegraf:
|
||||
container_name: telegraf
|
||||
@@ -25,11 +26,32 @@ services:
|
||||
- DATABASE=${TELEGRAF_MONGODB_DATABASE:?"Database name is required"}
|
||||
- DSN=${TELEGRAF_MONGODB_DSN:?"DSN is required"}
|
||||
- HOSTNAME=${TELEGRAF_HOSTNAME:?"Hostname is required"}
|
||||
- SOCKET_PORT=${SOCKET_PORT:?"Socket port is required"}
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080"]
|
||||
interval: 5s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
pull_policy: always
|
||||
networks:
|
||||
- backend
|
||||
build-extension:
|
||||
container_name: build-extension
|
||||
image: node:22.14.0-bookworm-slim
|
||||
volumes:
|
||||
- ./webrtc-internals-exporter:/tmp/webrtc-internals-exporter:ro
|
||||
- build-extension:/tmp/webrtc-internals-exporter-build
|
||||
working_dir: /tmp/webrtc-internals-exporter-build/webpack
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
cp -r /tmp/webrtc-internals-exporter/* /tmp/webrtc-internals-exporter-build
|
||||
npm install
|
||||
npm run build
|
||||
environment:
|
||||
- WEBRTC_INTERNALS_EXPORTER_URL=http://collector
|
||||
pull_policy: always
|
||||
networks:
|
||||
- backend
|
||||
|
||||
@@ -44,12 +66,14 @@ services:
|
||||
condition: service_healthy
|
||||
telegraf:
|
||||
condition: service_healthy
|
||||
build-extension:
|
||||
condition: service_completed_successfully
|
||||
environment:
|
||||
- VIDEO_URL=${VIDEO_URL:?"Video URL is required"}
|
||||
ports:
|
||||
- "9092:9092"
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
- SOCKET_URL=telegraf
|
||||
- HUB_URL=http://selenium:4444
|
||||
- WEBRTC_INTERNALS_PATH=/tmp/webrtc-internals-exporter
|
||||
pull_policy: always
|
||||
networks:
|
||||
- backend
|
||||
|
||||
@@ -57,4 +81,7 @@ networks:
|
||||
backend:
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.30.0.0/16
|
||||
- subnet: 172.100.0.0/16
|
||||
|
||||
volumes:
|
||||
build-extension:
|
||||
|
211
main.py
211
main.py
@@ -1,13 +1,14 @@
|
||||
import signal
|
||||
import json
|
||||
import time
|
||||
import socket
|
||||
import logging
|
||||
import os
|
||||
import argparse
|
||||
from time import sleep
|
||||
from functools import partial
|
||||
from http.server import HTTPServer
|
||||
from utils.PostHandler import Handler
|
||||
from utils.ColoredFormatter import ColoredFormatter
|
||||
from utils.Convenience import *
|
||||
from bs4 import BeautifulSoup as bs
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.options import Options
|
||||
@@ -16,11 +17,20 @@ from selenium.webdriver import ActionChains
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
from selenium.webdriver.support import expected_conditions as ec
|
||||
|
||||
# Plugin system imports
|
||||
import importlib
|
||||
import importlib.util
|
||||
import inspect
|
||||
import glob
|
||||
import sys # Import the sys module
|
||||
from utils.plugins_base import StatsSetupPlugin, StatsDownloadPlugin
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
args = None
|
||||
|
||||
def setupLogger():
|
||||
logging_format = "[%(asctime)s] (%(levelname)s) %(module)s - %(funcName)s: %(message)s"
|
||||
logging.basicConfig(level=logging.INFO, format=logging_format)
|
||||
logging.basicConfig(level=firstValid(args.log_level, os.getenv('LOG_LEVEL'), default='INFO'), format=logging_format) # type: ignore
|
||||
(logger := logging.getLogger(__name__)).setLevel(logging.INFO)
|
||||
logger.propagate = False
|
||||
(logger_handler := logging.StreamHandler()).setFormatter(
|
||||
@@ -28,43 +38,85 @@ def setupLogger():
|
||||
)
|
||||
logger.addHandler(logger_handler)
|
||||
|
||||
|
||||
def setupArgParser():
|
||||
parser = argparse.ArgumentParser(description='Collector for PeerTube stats.')
|
||||
parser.add_argument('-u', '--url', type=str, help='URL of the video to collect stats for.')
|
||||
parser.add_argument('--socket-url', type=str, help='URL of the socket to send the stats to. Default: localhost')
|
||||
parser.add_argument('--socket-port', type=int, help='Port of the socket to send the stats to. Default: 8094')
|
||||
parser.add_argument('--hub-url', type=str, help='URL of the Selenium hub to connect to. If not provided, local Chrome driver will be used.')
|
||||
parser.add_argument('--webrtc-internals-path', type=str, help='Path to the WebRTC internals extension.')
|
||||
parser.add_argument('--log-level', type=str, help='Log level to use. Default: INFO')
|
||||
parser.add_argument('--plugin-dir', type=str, help='Path to the plugin directory.')
|
||||
|
||||
return parser
|
||||
|
||||
def interrupt_handler(signum, driver: webdriver.Remote):
|
||||
logger.info(f'Handling signal {signum} ({signal.Signals(signum).name}).')
|
||||
|
||||
driver.quit()
|
||||
raise SystemExit
|
||||
|
||||
def setupChromeDriver():
|
||||
def setupChromeDriver(command_executor: str | None, webrtc_internals_path: str) -> webdriver.Remote | webdriver.Chrome:
|
||||
logger.log(logging.INFO, 'Setting up Chrome driver.')
|
||||
chrome_options = Options()
|
||||
#chrome_options.add_argument("--headless")
|
||||
chrome_options.add_argument("--no-sandbox")
|
||||
chrome_options.add_argument("--mute-audio")
|
||||
chrome_options.add_argument("--window-size=1280,720")
|
||||
#chrome_options.add_argument("--disable-dev-shm-usage")
|
||||
chrome_options.add_argument("--no-default-browser-check")
|
||||
chrome_options.add_argument("--disable-features=WebRtcHideLocalIpsWithMdns")
|
||||
#chrome_options.add_argument(f"--load-extension={os.path.abspath(os.path.join(os.path.dirname(__file__), 'webrtc-internals-exporter'))}")
|
||||
chrome_options.add_argument("--load-extension=/tmp/webrtc-internals-exporter")
|
||||
chrome_options.add_argument(f"--load-extension={webrtc_internals_path}")
|
||||
chrome_options.add_experimental_option('prefs', {'intl.accept_languages': 'en,en_US'})
|
||||
|
||||
#driver = webdriver.Chrome(options=chrome_options)
|
||||
driver = webdriver.Remote(command_executor='http://host.docker.internal:4444', options=chrome_options)
|
||||
if command_executor is not None:
|
||||
driver = webdriver.Remote(command_executor=command_executor, options=chrome_options)
|
||||
logger.warning(f'Using Selenium hub at {command_executor}.')
|
||||
else:
|
||||
driver = webdriver.Chrome(options=chrome_options)
|
||||
logger.warning('No Selenium hub URL provided, using local Chrome driver.')
|
||||
|
||||
logger.log(logging.INFO, 'Chrome driver setup complete.')
|
||||
|
||||
return driver
|
||||
|
||||
def saveStats(stats: list):
|
||||
try:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
logger.log(logging.DEBUG, f'Saving stats: {json.dumps(stats, indent=4)}')
|
||||
sock.sendto(json.dumps(stats).encode(), ('telegraf', 8094))
|
||||
sock.close()
|
||||
logger.log(logging.DEBUG, 'Sent stats to socket.')
|
||||
except socket.error as e:
|
||||
logger.error(f'Got socket error: {e}')
|
||||
def convert_to_bytes(down, downUnit):
|
||||
return float(down) * (1000 ** {'B': 0, 'KB': 1, 'MB': 2, 'GB': 3}[downUnit])
|
||||
|
||||
def downloadStats(driver: webdriver.Chrome, peersDict: dict):
|
||||
# Default Plugin Implementations
|
||||
class DefaultStatsSetupPlugin(StatsSetupPlugin):
|
||||
def setup_stats(self, driver: webdriver.Remote, url: str, retries: int = 5) -> webdriver.Remote:
|
||||
logger.log(logging.INFO, 'Setting up stats.')
|
||||
actions = ActionChains(driver)
|
||||
wait = WebDriverWait(driver, 30, poll_frequency=0.2)
|
||||
|
||||
sleep(2)
|
||||
|
||||
for attempt in range(retries):
|
||||
driver.get(url)
|
||||
try:
|
||||
wait.until(ec.presence_of_element_located((By.CLASS_NAME, 'vjs-big-play-button')))
|
||||
break
|
||||
except Exception:
|
||||
logger.error(f'Timeout while waiting for the big play button to be present. Attempt {attempt + 1} of {retries}')
|
||||
if attempt == retries - 1:
|
||||
logger.error('Timeout limit reached. Exiting.')
|
||||
driver.quit()
|
||||
raise SystemExit(1)
|
||||
|
||||
actions.click(driver.find_element(By.CLASS_NAME ,'video-js')).perform()
|
||||
wait.until(ec.visibility_of_element_located((By.CLASS_NAME, 'vjs-control-bar')))
|
||||
actions.context_click(driver.find_element(By.CLASS_NAME ,'video-js')).perform()
|
||||
statsForNerds = driver.find_elements(By.CLASS_NAME ,'vjs-menu-item')
|
||||
actions.click(statsForNerds[-1]).perform()
|
||||
wait.until(ec.presence_of_element_located((By.CSS_SELECTOR, 'div.vjs-stats-content[style="display: block;"]')))
|
||||
actions.move_to_element(driver.find_element(By.CLASS_NAME ,'vjs-control-bar')).perform()
|
||||
logger.log(logging.INFO, 'Stats setup complete.')
|
||||
|
||||
return driver
|
||||
|
||||
class DefaultStatsDownloadPlugin(StatsDownloadPlugin):
|
||||
def download_stats(self, driver: webdriver.Remote, peersDict: dict, socket_url: str, socket_port: int):
|
||||
html = driver.find_element(By.CLASS_NAME ,'vjs-stats-list').get_attribute('innerHTML')
|
||||
if html is not None:
|
||||
htmlBS = bs(html, 'html.parser')
|
||||
@@ -74,7 +126,7 @@ def downloadStats(driver: webdriver.Chrome, peersDict: dict):
|
||||
stats = htmlBS.find_all('div', attrs={'style': 'display: block;'})
|
||||
|
||||
playerStats = {
|
||||
stat.div.text: stat.span.text.replace('\u21d3', 'down').replace('down/', 'down /').replace('\u21d1 ', 'up').replace('\u21d1', 'up').replace('\u00b7', '-').strip()
|
||||
stat.div.text: stat.span.text.replace('\u21d3', 'down').replace('down/', 'down /').replace('\u21d1 ', 'up').replace('\u21d1', 'up').replace('\u00b7', '-').strip() # type: ignore
|
||||
for stat in stats
|
||||
}
|
||||
|
||||
@@ -155,45 +207,114 @@ def downloadStats(driver: webdriver.Chrome, peersDict: dict):
|
||||
'session': driver.session_id
|
||||
}
|
||||
|
||||
saveStats([stats])
|
||||
super().saveStats([stats], socket_url, socket_port)
|
||||
|
||||
def convert_to_bytes(down, downUnit):
|
||||
return float(down) * (1024 ** {'B': 0, 'KB': 1, 'MB': 2, 'GB': 3}[downUnit])
|
||||
# Plugin loading mechanism
|
||||
def load_plugins(plugin_dir: str) -> tuple[StatsSetupPlugin | None, StatsDownloadPlugin | None]:
|
||||
"""
|
||||
Loads plugins from the specified directory.
|
||||
|
||||
def setupStats(driver: webdriver.Remote, url: str):
|
||||
logger.log(logging.INFO, 'Setting up stats.')
|
||||
actions = ActionChains(driver)
|
||||
wait = WebDriverWait(driver, 30, poll_frequency=0.2)
|
||||
Args:
|
||||
plugin_dir: The directory to search for plugins.
|
||||
|
||||
driver.get(url)
|
||||
Returns:
|
||||
A tuple containing the loaded StatsSetupPlugin and StatsDownloadPlugin, or (None, None) if no plugins were found.
|
||||
"""
|
||||
|
||||
wait.until(ec.presence_of_element_located((By.CLASS_NAME, 'vjs-big-play-button')))
|
||||
actions.click(driver.find_element(By.CLASS_NAME ,'video-js')).perform()
|
||||
wait.until(ec.visibility_of_element_located((By.CLASS_NAME, 'vjs-control-bar')))
|
||||
actions.context_click(driver.find_element(By.CLASS_NAME ,'video-js')).perform()
|
||||
statsForNerds = driver.find_elements(By.CLASS_NAME ,'vjs-menu-item')
|
||||
actions.click(statsForNerds[-1]).perform()
|
||||
wait.until(ec.text_to_be_present_in_element((By.CLASS_NAME, 'vjs-stats-list'), 'Player'))
|
||||
actions.move_to_element(driver.find_element(By.CLASS_NAME ,'vjs-control-bar')).perform()
|
||||
logger.log(logging.INFO, 'Stats setup complete.')
|
||||
logger.info(f"Loading plugins from {plugin_dir}")
|
||||
|
||||
return driver
|
||||
setup_plugin = None
|
||||
download_plugin = None
|
||||
|
||||
plugin_files = glob.glob(os.path.join(plugin_dir, "*.py"))
|
||||
|
||||
# Log the contents of the plugin directory
|
||||
logger.debug(f"Plugin directory contents: {os.listdir(plugin_dir)}")
|
||||
|
||||
for plugin_file in plugin_files:
|
||||
module_name = os.path.basename(plugin_file)[:-3] # Remove .py extension
|
||||
logger.debug(f"Loading plugin file {plugin_file}")
|
||||
try:
|
||||
spec = importlib.util.spec_from_file_location(module_name, plugin_file)
|
||||
logger.debug(f"Spec: {spec}")
|
||||
if spec is None:
|
||||
logger.warning(f"Can't load plugin file {plugin_file}")
|
||||
continue
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
logger.debug(f"Module: {module}")
|
||||
if spec.loader is not None:
|
||||
spec.loader.exec_module(module)
|
||||
else:
|
||||
logger.warning(f"Can't load module {module_name} from {plugin_file}")
|
||||
|
||||
for name, obj in inspect.getmembers(module):
|
||||
logger.debug(f"Found member: {name} in module {module_name}")
|
||||
if inspect.isclass(obj):
|
||||
if issubclass(obj, StatsSetupPlugin) and obj is not StatsSetupPlugin:
|
||||
logger.info(f"Found StatsSetupPlugin: {obj.__name__}")
|
||||
setup_plugin = obj()
|
||||
logger.debug(f"Loaded StatsSetupPlugin: {obj.__name__} from {plugin_file}")
|
||||
elif issubclass(obj, StatsDownloadPlugin) and obj is not StatsDownloadPlugin:
|
||||
logger.info(f"Found StatsDownloadPlugin: {obj.__name__}")
|
||||
download_plugin = obj()
|
||||
logger.debug(f"Loaded StatsDownloadPlugin: {obj.__name__} from {plugin_file}")
|
||||
else:
|
||||
logger.debug(f"Class {obj.__name__} is not a subclass of StatsSetupPlugin or StatsDownloadPlugin")
|
||||
else:
|
||||
logger.debug(f"{name} is not a class")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error loading plugin {plugin_file}: {e}")
|
||||
|
||||
return setup_plugin, download_plugin
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = setupArgParser().parse_args()
|
||||
|
||||
setupLogger()
|
||||
|
||||
driver = setupChromeDriver()
|
||||
# Load plugins
|
||||
plugin_dir = firstValid(args.plugin_dir, os.getenv('PLUGIN_DIR'), default=None)
|
||||
if plugin_dir is None:
|
||||
logger.info("No plugin directory provided. Using default plugins.")
|
||||
setup_plugin = None
|
||||
download_plugin = None
|
||||
else:
|
||||
setup_plugin, download_plugin = load_plugins(plugin_dir)
|
||||
|
||||
# Use default plugins if none are loaded
|
||||
if setup_plugin is None:
|
||||
setup_plugin = DefaultStatsSetupPlugin()
|
||||
logger.info("Using default StatsSetupPlugin.")
|
||||
if download_plugin is None:
|
||||
download_plugin = DefaultStatsDownloadPlugin()
|
||||
logger.info("Using default StatsDownloadPlugin.")
|
||||
|
||||
command_executor = firstValid(args.hub_url, os.getenv('HUB_URL'), default=None)
|
||||
webrtc_internals_path = firstValid(
|
||||
args.webrtc_internals_path,
|
||||
os.getenv('WEBRTC_INTERNALS_PATH'),
|
||||
default=os.path.abspath(os.path.join(os.path.dirname(__file__), 'webrtc-internals-exporter'))
|
||||
)
|
||||
|
||||
driver = setupChromeDriver(command_executor, webrtc_internals_path)
|
||||
|
||||
signal.signal(signal.SIGINT, lambda signum, frame: interrupt_handler(signum, driver))
|
||||
|
||||
url = os.getenv('VIDEO_URL')
|
||||
url = firstValid(args.url, os.getenv('VIDEO_URL'), default=None)
|
||||
if url is None:
|
||||
logger.error('VIDEO_URL environment variable is not set.')
|
||||
logger.error('VIDEO_URL environment variable or --url argument is required.')
|
||||
raise SystemExit(1)
|
||||
|
||||
setupStats(driver, url)
|
||||
# Use the loaded plugin
|
||||
driver = setup_plugin.setup_stats(driver, url)
|
||||
|
||||
logger.log(logging.INFO, 'Starting server collector.')
|
||||
httpd = HTTPServer(('', 9092), partial(Handler, downloadStats, driver, logger))
|
||||
logger.info('Server collector started.')
|
||||
socket_url = firstValid(args.socket_url, os.getenv('SOCKET_URL'), default='localhost')
|
||||
try:
|
||||
socket_port = int(firstValid(args.socket_port, os.getenv('SOCKET_PORT'), default=8094))
|
||||
except ValueError:
|
||||
logger.error('Invalid socket port provided. Exiting.')
|
||||
raise SystemExit(1)
|
||||
|
||||
logger.info('Starting server collector.')
|
||||
httpd = HTTPServer(('', 9092), partial(Handler, download_plugin.download_stats, driver, logger, socket_url, socket_port))
|
||||
httpd.serve_forever()
|
55
monolith-entrypoint.sh
Normal file
55
monolith-entrypoint.sh
Normal file
@@ -0,0 +1,55 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -z "$TELEGRAF_HOSTNAME" ]; then
|
||||
echo "Error: TELEGRAF_HOSTNAME is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$TELEGRAF_MONGODB_DSN" ]; then
|
||||
echo "Error: TELEGRAF_MONGODB_DSN is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$TELEGRAF_MONGODB_DATABASE" ]; then
|
||||
echo "Error: TELEGRAF_MONGODB_DATABASE is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$SOCKET_PORT" ]; then
|
||||
echo "Error: SOCKET_PORT is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$VIDEO_URL" ]; then
|
||||
echo "Error: VIDEO_URL is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set the environment variables
|
||||
export DSN=$TELEGRAF_MONGODB_DSN
|
||||
export DATABASE=$TELEGRAF_MONGODB_DATABASE
|
||||
export HOSTNAME=$TELEGRAF_HOSTNAME
|
||||
|
||||
# Start the Selenium hub
|
||||
/opt/bin/entry_point.sh > /dev/null 2>&1 &
|
||||
|
||||
# Wait for Selenium hub to be ready
|
||||
printf 'Waiting for Selenium standalone to be ready'
|
||||
timeout=30
|
||||
while ! curl -sSL "http://localhost:4444/wd/hub/status" 2>/dev/null | jq -e '.value.ready' | grep -q true; do
|
||||
printf '.'
|
||||
sleep 1
|
||||
((timeout--))
|
||||
if [ $timeout -le 0 ]; then
|
||||
echo "Error: Selenium standalone did not become ready in time. Exiting..."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
printf '\n'
|
||||
|
||||
# Start the Telegraf agent
|
||||
telegraf --config ./telegraf.conf &
|
||||
|
||||
# Start the main Python script as PID 1
|
||||
exec ./venv/bin/python main.py
|
31
plugins/example_plugin.py
Normal file
31
plugins/example_plugin.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import logging
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.remote.webdriver import WebDriver as Remote
|
||||
from utils.plugins_base import StatsSetupPlugin, StatsDownloadPlugin
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ExampleStatsSetupPlugin(StatsSetupPlugin):
|
||||
def setup_stats(self, driver: webdriver.Chrome, url: str, retries: int = 5) -> webdriver.Chrome:
|
||||
logger.info("Running ExampleStatsSetupPlugin...")
|
||||
# Here you would implement the custom logic to setup stats
|
||||
# For example, you could click on a button to display stats.
|
||||
# You could also wait for an element to appear before continuing.
|
||||
# This is just an example
|
||||
|
||||
driver.get(url)
|
||||
|
||||
return driver
|
||||
|
||||
class ExampleStatsDownloadPlugin(StatsDownloadPlugin):
|
||||
def download_stats(self, driver: webdriver.Chrome, peersDict: dict, socket_url: str, socket_port: int):
|
||||
logger.info("Running ExampleStatsDownloadPlugin...")
|
||||
stats = {'message': 'Hello from ExampleStatsDownloadPlugin'}
|
||||
# Here you would implement the custom logic to download stats
|
||||
# and send them to the socket.
|
||||
# This is just an example
|
||||
|
||||
print(f"Sending stats: {stats} to {socket_url}:{socket_port}")
|
||||
|
||||
# Remember to call the saveStats method to send the stats to the socket
|
||||
super().saveStats([stats], socket_url, socket_port)
|
@@ -1,2 +1,3 @@
|
||||
selenium
|
||||
beautifulsoup4
|
||||
yaspin
|
29
selenium-standalone-stack/README.md
Normal file
29
selenium-standalone-stack/README.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# Selenium standalone grid deployment script
|
||||
|
||||
## Cloud provider
|
||||
|
||||
This script use the services of Hetzner.
|
||||
|
||||
It should be easily modified to use other cloud providers.
|
||||
|
||||
## Dependencies
|
||||
|
||||
You need to install `jq`, `nmap` and `hcloud`, the Hetzner cloud API CLI.
|
||||
|
||||
On Debian
|
||||
```bash
|
||||
apt install jq nmap hcloud-cli
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Just read the help provided by the script
|
||||
|
||||
```bash
|
||||
./create-selenium-stack.sh -h
|
||||
```
|
||||
|
||||
To remove all servers in the context:
|
||||
```bash
|
||||
./create-selenium-stack.sh -d -y
|
||||
```
|
288
selenium-standalone-stack/create-selenium-stack.sh
Normal file
288
selenium-standalone-stack/create-selenium-stack.sh
Normal file
@@ -0,0 +1,288 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -m # Enable Job Control
|
||||
|
||||
trap 'kill $(jobs -p)' SIGINT
|
||||
|
||||
# Reset
|
||||
NC='\033[0m' # Text Reset
|
||||
|
||||
# Regular Colors
|
||||
Red='\033[0;31m' # Red
|
||||
Green='\033[0;32m' # Green
|
||||
Cyan='\033[0;36m' # Cyan
|
||||
|
||||
if [[ -z $(which hcloud) ]]; then
|
||||
echo -e "${Red}hcloud could not be found in \$PATH!${NC}
|
||||
|
||||
Please put hcloud in \$PATH ($PATH),
|
||||
install it with your package manager
|
||||
or go to https://github.com/hetznercloud/cli/releases to download it."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $(which jq) ]]; then
|
||||
echo -e "${Red}jq could not be found in \$PATH!${NC}
|
||||
|
||||
Please install jq to use this script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $(which nmap) ]]; then
|
||||
echo -e "${Red}nmap could not be found in \$PATH!${NC}
|
||||
|
||||
Please install nmap to use this script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
usage() {
|
||||
if hcloud context list | grep -q -v "ACTIVE"; then
|
||||
types=$(hcloud server-type list -o columns=name,cores,cpu_type,memory,storage_type,architecture | grep -v arm | sed -e 's/^/ /')
|
||||
keys=$(hcloud ssh-key list -o columns=name,fingerprint,age | sed -e 's/^/ /')
|
||||
contexts=" Available contexts:
|
||||
$(hcloud context list | sed -e 's/^/ /')"
|
||||
else
|
||||
types="No hcloud context, can’t get server types"
|
||||
keys="No hcloud context, can’t get SSH keys"
|
||||
contexts="No hcloud context available.
|
||||
You can create one with the following command:
|
||||
hcloud create context name_of_the_context
|
||||
Or let this script create one during execution."
|
||||
fi
|
||||
|
||||
cat << EOF
|
||||
$(basename "$0") (c) Framasoft 2023, WTFPL
|
||||
|
||||
USAGE
|
||||
$(basename "$0") [-h] [-d] [-s <int>] [-n <int>] [-t <vps type>] [-c <hcloud context>] -k <ssh-key>
|
||||
|
||||
OPTIONS
|
||||
-h Print this help and exit
|
||||
-d Delete all servers
|
||||
-dy Delete all servers without confirmation
|
||||
-s <int> How many VPS you want to start.
|
||||
Default: 1
|
||||
Maximum should be: limit (hcloud).
|
||||
Default: 1
|
||||
-n <int> How many nodes you want to start on each VPS.
|
||||
Default: 1
|
||||
-t <vps type> The type of VPS to start.
|
||||
Default: cpx21.
|
||||
See below
|
||||
-c <hcloud context> Name of the hcloud context
|
||||
Default: selenium-peertube.
|
||||
See below
|
||||
-k <ssh-key> The ssh key used to connect to the VPS.
|
||||
MANDATORY, no default.Starting node
|
||||
See below.
|
||||
-e <string> The path to the environment file to be copied and used on the VPS.
|
||||
Default: .env
|
||||
|
||||
$types
|
||||
|
||||
HCLOUD CONTEXT
|
||||
It’s the name of the project you want to create your VPS in.
|
||||
|
||||
$contexts
|
||||
|
||||
SSH KEYS
|
||||
You must have a ssh key registered on Hetzner to use this script.
|
||||
To create a key:
|
||||
hcloud ssh-key create --name my-key --public-key-from-file ~/.ssh/id_ed25519.pub
|
||||
|
||||
The ssh keys currently registered on Hetzner are:
|
||||
$keys
|
||||
EOF
|
||||
exit "$1"
|
||||
}
|
||||
|
||||
delete_server() {
|
||||
echo -e "${Cyan}$(hcloud server delete "$1")${NC}"
|
||||
}
|
||||
|
||||
create_nodes_server() {
|
||||
i="$1"
|
||||
TYPE="$2"
|
||||
KEY="$3"
|
||||
REGION="$4"
|
||||
SERVER_NAME="$REGION-node-$i"
|
||||
hcloud server create --start-after-create --name "$SERVER_NAME" --image debian-12 --type "$TYPE" --location "$REGION" --ssh-key "$KEY" > /dev/null
|
||||
echo -e "${Cyan}VPS n°$i created and started${NC}"
|
||||
}
|
||||
|
||||
start_nodes() {
|
||||
i="$1"
|
||||
REGION=$(hcloud server list -o json | jq -r '.[] | select(.name | contains("node-'$i'")) | .datacenter.location.name')
|
||||
SERVER_NAME="$REGION-node-$i"
|
||||
SERVER_IP=$(hcloud server ip "$SERVER_NAME")
|
||||
while [[ $(nmap -p 22 "$SERVER_IP" | grep -c open) -eq 0 ]]; do
|
||||
sleep 1
|
||||
done
|
||||
SSH_CONN="root@$SERVER_IP"
|
||||
scp -o "LogLevel=ERROR" -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" -o "VerifyHostKeyDNS no" start-nodes.sh "${SSH_CONN}:" > /dev/null
|
||||
scp -o "LogLevel=ERROR" -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" -o "VerifyHostKeyDNS no" "$ENV_FILE" "${SSH_CONN}:" > /dev/null
|
||||
ssh -o "LogLevel=ERROR" -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" -o "VerifyHostKeyDNS no" "$SSH_CONN" "/root/start-nodes.sh -n \"$NODES\"" > /dev/null
|
||||
echo -e "${Cyan}Nodes created on VPS n°${i}${NC}"
|
||||
}
|
||||
|
||||
CONTEXT=selenium-peertube
|
||||
SERVERS=1
|
||||
NODES=1
|
||||
TYPE=cpx21
|
||||
DELETE=0
|
||||
N_STRING=node
|
||||
FORCE_DELETION=0
|
||||
ENV_FILE=.env
|
||||
|
||||
while getopts "hds:n:t:k:c:y" option; do
|
||||
case $option in
|
||||
h)
|
||||
usage 0
|
||||
;;
|
||||
d)
|
||||
DELETE=1
|
||||
;;
|
||||
s)
|
||||
SERVERS=$OPTARG
|
||||
;;
|
||||
n)
|
||||
NODES=$OPTARG
|
||||
if [[ $NODES -gt 1 ]]; then
|
||||
N_STRING=nodes
|
||||
fi
|
||||
;;
|
||||
t)
|
||||
TYPE=$OPTARG
|
||||
;;
|
||||
k)
|
||||
KEY=$OPTARG
|
||||
;;
|
||||
c)
|
||||
CONTEXT=$OPTARG
|
||||
;;
|
||||
y)
|
||||
FORCE_DELETION=1
|
||||
;;
|
||||
*)
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $(hcloud context active) != "$CONTEXT" ]]; then
|
||||
echo -e "${Cyan}Hcloud context is not '$CONTEXT'!${NC}"
|
||||
if hcloud context list | grep -q -F "$CONTEXT"; then
|
||||
echo -e "${Green}Selecting hcloud context ${CONTEXT}${NC}"
|
||||
hcloud context use "$CONTEXT"
|
||||
else
|
||||
echo -e "${Red}Hcloud context ${CONTEXT} does not exist.${NC}
|
||||
${Cyan}Will now try to create the context ${CONTEXT}${NC}"
|
||||
hcloud context create "$CONTEXT"
|
||||
fi
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $DELETE -eq 1 ]]; then
|
||||
SERVERS=$(hcloud server list -o json)
|
||||
if [[ $SERVERS == 'null' ]]; then
|
||||
echo -e "${Cyan}No VPS to delete.${NC}"
|
||||
exit 0
|
||||
fi
|
||||
NAMES=$(echo "$SERVERS" | jq -r '.[] | .name' | sort -h)
|
||||
echo -e "${Red}You are about to delete the following VPS${NC}:"
|
||||
echo "$NAMES"
|
||||
if [[ $FORCE_DELETION -eq 1 ]]; then
|
||||
confirm="yes"
|
||||
else
|
||||
echo -e -n "${Cyan}Please confirm the deletion by typing '${NC}${Red}yes${NC}': "
|
||||
read -r confirm
|
||||
fi
|
||||
if [[ $confirm == 'yes' ]]; then
|
||||
for i in $NAMES; do
|
||||
echo -e "${Cyan}Starting server $i deletion${NC}"
|
||||
delete_server "$i" &
|
||||
done
|
||||
# Wait for all delete_server jobs to finish
|
||||
while true; do
|
||||
fg > /dev/null 2>&1
|
||||
[ $? == 1 ] && break
|
||||
done
|
||||
if [[ $(hcloud server list -o json) == '[]' ]]; then
|
||||
echo -e "${Green}All servers have been deleted${NC}"
|
||||
else
|
||||
echo -e "${Red}Some servers have not been deleted:${NC}"
|
||||
hcloud server list
|
||||
fi
|
||||
else
|
||||
echo "Deletion cancelled."
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -z $KEY ]]; then
|
||||
echo -e "${Red}You must choose a ssh key!${NC}\n"
|
||||
usage 1
|
||||
fi
|
||||
|
||||
KEY_FOUND=0
|
||||
for i in $(hcloud ssh-key list -o json | jq -r '.[] | .name'); do
|
||||
if [[ $i == "$KEY" ]]; then
|
||||
KEY_FOUND=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $KEY_FOUND -eq 0 ]]; then
|
||||
echo -e "${Red}The chosen ssh key is not registered on Hetzner!${NC}\n"
|
||||
usage 1
|
||||
fi
|
||||
|
||||
if hcloud server list | grep -q -v NAME; then
|
||||
echo -e "${Red}There already are servers in the context! Exiting.${NC}\nList of the servers:"
|
||||
hcloud server list
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -f "$ENV_FILE" ]]; then
|
||||
echo -e "${Red}Environment file '$ENV_FILE' does not exist!${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${Green}Creating $SERVERS VPS${NC}"
|
||||
REGIONS=($(hcloud location list -o json | jq -r '.[] | select(.name != "fsn1") | .name' | shuf))
|
||||
for i in $(seq 1 "$SERVERS"); do
|
||||
REGION=${REGIONS[$((i % ${#REGIONS[@]}))]}
|
||||
echo -e "${Cyan}Creating VPS n°$i in $REGION"
|
||||
create_nodes_server "$i" "$TYPE" "$KEY" "$REGION" &
|
||||
done
|
||||
|
||||
# Wait for all create_nodes_server jobs to finish
|
||||
while true; do
|
||||
fg > /dev/null 2>&1
|
||||
[ $? == 1 ] && break
|
||||
done
|
||||
|
||||
echo -e "${Green}Starting nodes on $SERVERS VPS ($NODES $N_STRING each)${NC}"
|
||||
for i in $(seq 1 "$SERVERS"); do
|
||||
echo -e "${Cyan}Starting $N_STRING on VPS n°$i${NC}"
|
||||
start_nodes "$i" &
|
||||
done
|
||||
|
||||
echo -e "${Green}Waiting for all nodes to be started${NC}"
|
||||
|
||||
# Wait for all start_nodes jobs to finish
|
||||
while true; do
|
||||
fg > /dev/null 2>&1
|
||||
[ $? == 1 ] && break
|
||||
done
|
||||
|
||||
echo -e "${Green}All the servers and nodes have been created and started!
|
||||
|
||||
Number of servers: $SERVERS
|
||||
Number of nodes per server: $NODES
|
||||
Type of the servers:
|
||||
nodes servers: $TYPE
|
||||
|
||||
You can remove all servers with the following command
|
||||
$0 -d${NC}"
|
126
selenium-standalone-stack/start-nodes.sh
Normal file
126
selenium-standalone-stack/start-nodes.sh
Normal file
@@ -0,0 +1,126 @@
|
||||
#!/bin/bash
|
||||
|
||||
usage() {
|
||||
cat << EOF
|
||||
$(basename "$0") (c) Framasoft 2023, WTPF
|
||||
|
||||
USAGE
|
||||
$(basename "$0") [-h] [-n <int>]
|
||||
|
||||
OPTIONS
|
||||
-h print this help and exit
|
||||
-n <int> how many selenium nodes you want to launch. Default: 1
|
||||
-e <string> the environment file path to use. Default: .env
|
||||
EOF
|
||||
exit "$1"
|
||||
}
|
||||
|
||||
NUMBER=1
|
||||
ENV_FILE=".env"
|
||||
|
||||
while getopts "hn:i:" option; do
|
||||
case $option in
|
||||
h)
|
||||
usage 0
|
||||
;;
|
||||
n)
|
||||
NUMBER=$OPTARG
|
||||
;;
|
||||
e)
|
||||
ENV_FILE=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
HOST=$(hostname)
|
||||
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
export DEBIAN_FRONTEND
|
||||
|
||||
echo "Installing packages"
|
||||
apt-get -qq -y update
|
||||
apt-get -qq -y dist-upgrade
|
||||
apt-get -qq -y install jq \
|
||||
tmux \
|
||||
vim \
|
||||
multitail \
|
||||
htop \
|
||||
liquidprompt \
|
||||
coreutils \
|
||||
apparmor-utils \
|
||||
docker.io \
|
||||
|
||||
echo "Activating liquidprompt"
|
||||
liquidprompt_activate
|
||||
. /usr/share/liquidprompt/liquidprompt
|
||||
|
||||
echo "Modifying kernel parameters"
|
||||
sysctl net.ipv6.conf.default.forwarding=1
|
||||
sysctl net.ipv6.conf.all.forwarding=1
|
||||
|
||||
echo "Configuring Docker for IPv6"
|
||||
IP_ADDR=$(ip --json a show eth0 | jq '.[] | .addr_info | .[] | select(.family | contains("inet6")) | select(.scope | contains("global")) | .local' -r)
|
||||
NETWORK=$(echo "$IP_ADDR" | sed -e 's@:[^:]\+$@8000::/65@')
|
||||
|
||||
cat << EOF > /etc/docker/daemon.json
|
||||
{
|
||||
"ipv6": true,
|
||||
"fixed-cidr-v6": "$NETWORK"
|
||||
}
|
||||
EOF
|
||||
systemctl restart docker
|
||||
|
||||
echo "Starting $NUMBER Selenium nodes"
|
||||
|
||||
for NB in $(seq 1 "$NUMBER"); do
|
||||
NODE_NAME="selenium-${HOST}-instance-${NB}"
|
||||
|
||||
# Replace variables in the environment file
|
||||
TEMP_ENV_FILE=$(mktemp)
|
||||
while IFS= read -r line; do
|
||||
eval "echo \"$line\""
|
||||
done < "$ENV_FILE" > "$TEMP_ENV_FILE"
|
||||
ENV_FILE="$TEMP_ENV_FILE"
|
||||
|
||||
echo "Starting Selenium node n°$NB"
|
||||
docker run --rm \
|
||||
--env-file $ENV_FILE \
|
||||
--name "$NODE_NAME" \
|
||||
--pull always \
|
||||
--shm-size="2g" \
|
||||
-d \
|
||||
kobimex/peertube-collector-monolith:latest > /dev/null 2>&1
|
||||
|
||||
# Wait until the container gets an IPv6 address.
|
||||
DOCKER_IP=""
|
||||
for i in {1..10}; do
|
||||
DOCKER_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.GlobalIPv6Address}}{{end}}' "$NODE_NAME")
|
||||
if [ -n "$DOCKER_IP" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [ -z "$DOCKER_IP" ]; then
|
||||
echo "Error: Could not retrieve a valid IPv6 address for $NODE_NAME." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Adding Selenium node n°$NB to neighbour proxy"
|
||||
ip -6 neighbour add proxy "$DOCKER_IP" dev eth0
|
||||
docker stop "$NODE_NAME"
|
||||
sleep 1
|
||||
|
||||
docker run --rm \
|
||||
--env-file $ENV_FILE \
|
||||
--name "$NODE_NAME" \
|
||||
--pull always \
|
||||
--shm-size="2g" \
|
||||
-d \
|
||||
-p 790$NB:790$NB \
|
||||
-e "SE_NO_VNC_PORT=790$NB" \
|
||||
kobimex/peertube-collector-monolith:latest > /dev/null 2>&1
|
||||
done
|
5
server/.env.example
Normal file
5
server/.env.example
Normal file
@@ -0,0 +1,5 @@
|
||||
# MongoDB Environment
|
||||
MONGO_INITDB_ROOT_USERNAME=root
|
||||
MONGO_INITDB_ROOT_PASSWORD=password
|
||||
MONGO_EXPRESS_USERNAME=admin
|
||||
MONGO_EXPRESS_PASSWORD=password
|
30
server/README.md
Normal file
30
server/README.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Server
|
||||
|
||||
The repository contains a `server` directory with a simple MongoDB server (with initializations scripts) and WebUI that serves the WebRTC stats collected by the collector.
|
||||
|
||||
It's not mandatory to run and use this service, it's provided just as an example of how to store collected data.
|
||||
|
||||
## Setup
|
||||
|
||||
1. Change to the `server` directory:
|
||||
```sh
|
||||
cd server
|
||||
```
|
||||
|
||||
2. Create and configure the environment file based on the `.env.example` file:
|
||||
```sh
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
3. Start the Docker containers:
|
||||
```sh
|
||||
docker compose up
|
||||
```
|
||||
|
||||
The WebUI control panel will be available at [http://localhost:8081](http://localhost:8081).
|
||||
|
||||
# Credits
|
||||
|
||||
- [WebRTC Internals Exporter](https://github.com/vpalmisano/webrtc-internals-exporter)
|
||||
- [WebRTC debugging with Prometheus/Grafana](https://medium.com/@vpalmisano/webrtc-debugging-with-prometheus-grafana-254b6ac71063)
|
||||
- [MongoDB Docker Compose examples](https://github.com/TGITS/docker-compose-examples/tree/main/mongodb-docker-compose-examples)
|
49
server/docker-compose.yml
Normal file
49
server/docker-compose.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
services:
|
||||
mongodb:
|
||||
image: mongo:latest
|
||||
container_name: mongodb
|
||||
hostname: mongodb
|
||||
volumes:
|
||||
- ./mongodb/initdb.d/mongo-init.js:/docker-entrypoint-initdb.d/mongo-init.js:ro
|
||||
- mongodb-data:/data/db/
|
||||
- mongodb-log:/var/log/mongodb/
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
MONGO_INITDB_ROOT_USERNAME: ${MONGO_INITDB_ROOT_USERNAME}
|
||||
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_INITDB_ROOT_PASSWORD}
|
||||
ports:
|
||||
- "27017:27017"
|
||||
networks:
|
||||
- mongodb_network
|
||||
|
||||
mongo-express:
|
||||
image: mongo-express:latest
|
||||
container_name: mongo-express
|
||||
restart: always
|
||||
environment:
|
||||
ME_CONFIG_MONGODB_ADMINUSERNAME: ${MONGO_INITDB_ROOT_USERNAME}
|
||||
ME_CONFIG_MONGODB_ADMINPASSWORD: ${MONGO_INITDB_ROOT_PASSWORD}
|
||||
ME_CONFIG_MONGODB_PORT: 27017
|
||||
ME_CONFIG_MONGODB_SERVER: 'mongodb'
|
||||
ME_CONFIG_BASICAUTH_USERNAME: ${MONGO_EXPRESS_USERNAME}
|
||||
ME_CONFIG_BASICAUTH_PASSWORD: ${MONGO_EXPRESS_PASSWORD}
|
||||
ports:
|
||||
- 8081:8081
|
||||
networks:
|
||||
- mongodb_network
|
||||
depends_on:
|
||||
- mongodb
|
||||
|
||||
volumes:
|
||||
mongodb-data:
|
||||
driver: local
|
||||
name: mongo-data
|
||||
mongodb-log:
|
||||
driver: local
|
||||
name: mongo-log
|
||||
|
||||
networks:
|
||||
mongodb_network:
|
||||
driver: bridge
|
||||
name: mongo-network
|
33
server/mongodb/initdb.d/mongo-init.js
Normal file
33
server/mongodb/initdb.d/mongo-init.js
Normal file
@@ -0,0 +1,33 @@
|
||||
db = db.getSiblingDB("statistics");
|
||||
|
||||
db.createRole({
|
||||
role: "statsReadWrite",
|
||||
privileges: [
|
||||
{
|
||||
resource: {
|
||||
db: "statistics",
|
||||
collection: "peertube",
|
||||
},
|
||||
actions: ["insert"],
|
||||
},
|
||||
],
|
||||
roles: [
|
||||
{
|
||||
role: "read",
|
||||
db: "statistics",
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
db.createUser({
|
||||
user: "stats_user",
|
||||
pwd: "@z^VFhN7q%vzit",
|
||||
roles: [
|
||||
{
|
||||
role: 'statsReadWrite',
|
||||
db: 'statistics',
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
db.createCollection("peertube");
|
BIN
server/peertube data/statistics.peertube_hetzner_default_latency.json
(Stored with Git LFS)
Normal file
BIN
server/peertube data/statistics.peertube_hetzner_default_latency.json
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
server/peertube data/statistics.peertube_hetzner_high_latency.json
(Stored with Git LFS)
Normal file
BIN
server/peertube data/statistics.peertube_hetzner_high_latency.json
(Stored with Git LFS)
Normal file
Binary file not shown.
@@ -7,7 +7,7 @@
|
||||
dedup_interval = "600s"
|
||||
|
||||
[[inputs.socket_listener]]
|
||||
service_address = "udp://:8094"
|
||||
service_address = "udp://:${SOCKET_PORT}"
|
||||
data_format = "xpath_json"
|
||||
[[inputs.socket_listener.xpath]]
|
||||
metric_name = "'peertube'"
|
||||
|
5
utils/Convenience.py
Normal file
5
utils/Convenience.py
Normal file
@@ -0,0 +1,5 @@
|
||||
def firstValid(*args, default):
|
||||
for arg in args:
|
||||
if arg is not None:
|
||||
return arg
|
||||
return default
|
@@ -3,10 +3,12 @@ import logging
|
||||
from http.server import BaseHTTPRequestHandler
|
||||
|
||||
class Handler(BaseHTTPRequestHandler):
|
||||
def __init__(self, custom_func, driver, logger, *args, **kwargs):
|
||||
def __init__(self, custom_func, driver, logger, socket_url, socket_port, *args, **kwargs):
|
||||
self._custom_func = custom_func
|
||||
self.logger = logger
|
||||
self.driver = driver
|
||||
self._socket_url = socket_url
|
||||
self._socket_port = socket_port
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def do_POST(self):
|
||||
@@ -14,7 +16,7 @@ class Handler(BaseHTTPRequestHandler):
|
||||
content_length = int(self.headers['Content-Length'])
|
||||
post_data = self.rfile.read(content_length)
|
||||
self.logger.log(logging.DEBUG, f"POST request,\nPath: {self.path}\nHeaders:\n{self.headers}\n\nBody:\n{post_data.decode('utf-8')}")
|
||||
self._custom_func(self.driver, json.loads(post_data.decode('utf-8')))
|
||||
self._custom_func(self.driver, json.loads(post_data.decode('utf-8')), self._socket_url, self._socket_port)
|
||||
self.send_response(200)
|
||||
self.end_headers()
|
||||
self.wfile.write(b'POST request received')
|
||||
@@ -24,6 +26,11 @@ class Handler(BaseHTTPRequestHandler):
|
||||
self.wfile.write(b'404 Not Found')
|
||||
|
||||
def do_GET(self):
|
||||
if self.path == '/heartbeat':
|
||||
self.send_response(200)
|
||||
self.end_headers()
|
||||
self.wfile.write(b'Heartbeat OK')
|
||||
else:
|
||||
self.send_response(404)
|
||||
self.end_headers()
|
||||
self.wfile.write(b'404 Not Found')
|
29
utils/plugins_base.py
Normal file
29
utils/plugins_base.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import abc
|
||||
import json
|
||||
import socket
|
||||
import logging
|
||||
from selenium import webdriver
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Abstract Base Classes for Plugins
|
||||
class StatsSetupPlugin(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def setup_stats(self, driver: webdriver.Remote | webdriver.Chrome, url: str, retries: int = 5) -> webdriver.Remote | webdriver.Chrome:
|
||||
pass
|
||||
|
||||
class StatsDownloadPlugin(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def download_stats(self, driver: webdriver.Remote | webdriver.Chrome, peersDict: dict, socket_url: str, socket_port: int):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def saveStats(stats: list, socket_url: str, socket_port: int):
|
||||
try:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
logger.debug(f'Saving stats: {json.dumps(stats, indent=4)}')
|
||||
sock.sendto(json.dumps(stats).encode(), (socket_url, socket_port))
|
||||
sock.close()
|
||||
logger.debug('Sent stats to socket.')
|
||||
except socket.error as e:
|
||||
logger.error(f'Got socket error: {e}')
|
1
webrtc-internals-exporter/background.bundle.js
Normal file
1
webrtc-internals-exporter/background.bundle.js
Normal file
File diff suppressed because one or more lines are too long
@@ -6,17 +6,17 @@ function log(...args) {
|
||||
|
||||
log("loaded");
|
||||
|
||||
import "/assets/pako.min.js";
|
||||
import "./assets/pako.min.js";
|
||||
|
||||
const DEFAULT_OPTIONS = {
|
||||
url: "http://localhost:9092",
|
||||
url: process.env.WEBRTC_INTERNALS_EXPORTER_URL + ":9092",
|
||||
username: "",
|
||||
password: "",
|
||||
updateInterval: 2,
|
||||
gzip: false,
|
||||
job: "webrtc-internals-exporter",
|
||||
enabledOrigins: {},
|
||||
enabledStats: ["data-channel", "local-candidate", "remote-candidate"]
|
||||
enabledStats: ["data-channel", "local-candidate", "remote-candidate", "candidate-pair"]
|
||||
};
|
||||
|
||||
const options = {};
|
||||
|
@@ -40,7 +40,7 @@
|
||||
}
|
||||
],
|
||||
"background": {
|
||||
"service_worker": "background.js",
|
||||
"service_worker": "background.bundle.js",
|
||||
"type": "module"
|
||||
},
|
||||
"web_accessible_resources": [
|
||||
|
@@ -22,7 +22,7 @@ class WebrtcInternalExporter {
|
||||
});
|
||||
|
||||
window.postMessage({ event: "webrtc-internal-exporter:ready" });
|
||||
this.collectAllStats();
|
||||
setInterval(() => this.collectAndPostAllStats(), this.updateInterval);
|
||||
}
|
||||
|
||||
randomId() {
|
||||
@@ -40,6 +40,9 @@ class WebrtcInternalExporter {
|
||||
pc.iceCandidateErrors = [];
|
||||
this.peerConnections.set(id, pc);
|
||||
pc.addEventListener("connectionstatechange", () => {
|
||||
log(`connectionStateChange: ${pc.connectionState}`);
|
||||
this.collectAndPostAllStats();
|
||||
|
||||
if (pc.connectionState === "closed") {
|
||||
this.peerConnections.delete(id);
|
||||
}
|
||||
@@ -83,24 +86,27 @@ class WebrtcInternalExporter {
|
||||
}
|
||||
|
||||
async collectAndPostSingleStat(id) {
|
||||
const stats = await this.collectStats(id, this.collectAndPostSingleStat);
|
||||
const stats = await this.collectStats(id);
|
||||
if (Object.keys(stats).length === 0 || !stats) return;
|
||||
|
||||
window.postMessage(
|
||||
{
|
||||
event: "webrtc-internal-exporter:peer-connection-stats",
|
||||
stats
|
||||
stats: [stats]
|
||||
},
|
||||
stats
|
||||
[stats]
|
||||
);
|
||||
|
||||
log(`Single stat collected:`, [stats]);
|
||||
}
|
||||
|
||||
async collectAllStats() {
|
||||
async collectAndPostAllStats() {
|
||||
const stats = [];
|
||||
|
||||
for (const [id, pc] of this.peerConnections) {
|
||||
for (const [id] of this.peerConnections) {
|
||||
if (this.url && this.enabled) {
|
||||
const pcStats = await this.collectStats(id, pc);
|
||||
const pcStats = await this.collectStats(id);
|
||||
if (Object.keys(pcStats).length === 0 || !pcStats) continue;
|
||||
stats.push(pcStats);
|
||||
}
|
||||
}
|
||||
@@ -108,28 +114,24 @@ class WebrtcInternalExporter {
|
||||
window.postMessage(
|
||||
{
|
||||
event: "webrtc-internal-exporter:peer-connections-stats",
|
||||
data: JSON.parse(JSON.stringify(stats)),
|
||||
data: stats
|
||||
},
|
||||
stats
|
||||
);
|
||||
|
||||
log(`Stats collected:`, JSON.parse(JSON.stringify(stats)));
|
||||
log(`Stats collected:`, stats);
|
||||
|
||||
setTimeout(this.collectAllStats.bind(this), this.updateInterval);
|
||||
return stats;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {string} id
|
||||
* @param {RTCPeerConnection} pc
|
||||
* @param {Function} binding
|
||||
*/
|
||||
async collectStats(id, pc, binding) {
|
||||
var completeStats = {};
|
||||
|
||||
if (!pc) {
|
||||
pc = this.peerConnections.get(id);
|
||||
async collectStats(id) {
|
||||
var pc = this.peerConnections.get(id);
|
||||
if (!pc) return;
|
||||
}
|
||||
|
||||
var completeStats = {};
|
||||
|
||||
if (this.url && this.enabled) {
|
||||
try {
|
||||
@@ -157,10 +159,6 @@ class WebrtcInternalExporter {
|
||||
|
||||
if (pc.connectionState === "closed") {
|
||||
this.peerConnections.delete(id);
|
||||
} else {
|
||||
if (binding) {
|
||||
setTimeout(binding.bind(this), this.updateInterval, id);
|
||||
}
|
||||
}
|
||||
|
||||
return completeStats;
|
||||
|
3
webrtc-internals-exporter/webpack/babel.config.js
Normal file
3
webrtc-internals-exporter/webpack/babel.config.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = {
|
||||
shouldPrintComment: () => false
|
||||
};
|
26
webrtc-internals-exporter/webpack/build.js
Normal file
26
webrtc-internals-exporter/webpack/build.js
Normal file
@@ -0,0 +1,26 @@
|
||||
const { execSync } = require('child_process');
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
let url = '';
|
||||
|
||||
args.forEach((arg, index) => {
|
||||
if (arg === '-u' || arg === '--url') {
|
||||
url = args[index + 1];
|
||||
} else if (arg === '-h' || arg === '--help') {
|
||||
console.log('Usage: npm run build -- [-u|--url <url>]');
|
||||
console.log('Options:');
|
||||
console.log(' -u, --url <url> URL to use for the extension collector server');
|
||||
console.log(' -h, --help Display this help message');
|
||||
process.exit(0);
|
||||
} else if (arg.startsWith('-')) {
|
||||
console.error(`Unrecognized argument: ${arg}`);
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
if (url) {
|
||||
console.log(`Building with URL: ${url}`);
|
||||
execSync(`webpack --env URL=${url}`, { stdio: 'inherit' });
|
||||
} else {
|
||||
execSync('webpack', { stdio: 'inherit' });
|
||||
}
|
9369
webrtc-internals-exporter/webpack/package-lock.json
generated
Normal file
9369
webrtc-internals-exporter/webpack/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
33
webrtc-internals-exporter/webpack/package.json
Normal file
33
webrtc-internals-exporter/webpack/package.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"name": "webrtc-internals-exporter",
|
||||
"version": "1.0.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"build": "node build.js"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "Mirko Milovanovic",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"babel-loader": "^8.2.2",
|
||||
"clean-webpack-plugin": "^3.0.0",
|
||||
"copy-webpack-plugin": "^12.0.2",
|
||||
"dotenv": "^16.4.7",
|
||||
"file-loader": "^6.2.0",
|
||||
"html-webpack-plugin": "^5.3.1",
|
||||
"mini-css-extract-plugin": "^1.6.0",
|
||||
"postcss": "^8.2.14",
|
||||
"postcss-loader": "^5.2.0",
|
||||
"postcss-preset-env": "^10.1.4",
|
||||
"sass": "^1.32.12",
|
||||
"sass-loader": "^11.0.1",
|
||||
"serve": "^14.2.4",
|
||||
"style-loader": "^2.0.0",
|
||||
"terser-webpack-plugin": "^5.1.1",
|
||||
"ts-loader": "^9.1.2",
|
||||
"typescript": "^4.2.4",
|
||||
"webpack": "^5.38.1",
|
||||
"webpack-cli": "^4.7.2",
|
||||
"webpack-dev-server": "^5.2.0"
|
||||
}
|
||||
}
|
34
webrtc-internals-exporter/webpack/webpack.config.js
Normal file
34
webrtc-internals-exporter/webpack/webpack.config.js
Normal file
@@ -0,0 +1,34 @@
|
||||
const path = require('path');
|
||||
const { EnvironmentPlugin } = require('webpack');
|
||||
const envPath = path.resolve(__dirname, '../../.env');
|
||||
const envConfig = require('dotenv').config({ path: envPath }).parsed;
|
||||
|
||||
module.exports = (env) => {
|
||||
const url = env.URL || 'http://localhost';
|
||||
|
||||
return {
|
||||
entry: '../background.js',
|
||||
target: 'web',
|
||||
mode: 'production',
|
||||
module: {
|
||||
rules: [
|
||||
{
|
||||
test: /\.js?$/,
|
||||
use: 'babel-loader',
|
||||
exclude: /node_modules/,
|
||||
},
|
||||
],
|
||||
},
|
||||
resolve: { extensions: ['.tsx', '.ts', '.js'] },
|
||||
output: {
|
||||
filename: 'background.bundle.js',
|
||||
path: path.resolve(__dirname, '../'),
|
||||
publicPath: '',
|
||||
},
|
||||
plugins: [
|
||||
new EnvironmentPlugin({
|
||||
WEBRTC_INTERNALS_EXPORTER_URL: envConfig.WEBRTC_INTERNALS_EXPORTER_URL || url
|
||||
}),
|
||||
],
|
||||
};
|
||||
};
|
Reference in New Issue
Block a user