Compare commits
26 Commits
be0e0f8153
...
main
Author | SHA1 | Date | |
---|---|---|---|
|
8b35d3068b | ||
2b18644024 | |||
fbd87e01c5 | |||
a67a99f849 | |||
6da53a8907 | |||
7b4b922923 | |||
87e1d24a86 | |||
83480ed3a8 | |||
5ec57150b1 | |||
bd80c1c6cc | |||
9e125bf9f7 | |||
a0cf93a6be | |||
5c92020169 | |||
84479c786a | |||
5636d92474 | |||
0518d1ba48 | |||
f579000a96 | |||
752cea15d4 | |||
5be06ec11f | |||
58369fccaf | |||
4c46af63e3 | |||
755128a5c7 | |||
76edbf0624 | |||
280a013c48 | |||
a7bdf93abd | |||
7b78f54510 |
@@ -1,8 +1,13 @@
|
||||
# User defined hostname persisted across all sessions, used to keep track of the same user
|
||||
# Set to $NODE_NAME to use the hostname of the node when running a cluster with Hetzner Cloud CLI
|
||||
TELEGRAF_HOSTNAME=
|
||||
# MongoDB connection string
|
||||
TELEGRAF_MONGODB_DSN=mongodb://stats_user:%40z%5EVFhN7q%25vzit@tube.kobim.cloud:27107/?authSource=statistics
|
||||
# MongoDB database name to store the data
|
||||
TELEGRAF_MONGODB_DATABASE=statistics
|
||||
# URL of the video to be analyzed
|
||||
VIDEO_URL=https://tube.kobim.cloud/w/iN2T8PmbSb4HJTDA2rV3sg
|
||||
VIDEO_URL=https://tube.kobim.cloud/w/eT1NZibmwMy6bx6N2YGLwr
|
||||
# Selenium Grid Hub URL
|
||||
#HUB_URL=http://localhost:4444
|
||||
# Socket port to send and listen for incoming data
|
||||
#SOCKET_PORT=8094
|
||||
|
2
.gitattributes
vendored
Normal file
2
.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
server/peertube[[:space:]]data/statistics.peertube_hetzner_default_latency.json filter=lfs diff=lfs merge=lfs -text
|
||||
server/peertube[[:space:]]data/statistics.peertube_hetzner_high_latency.json filter=lfs diff=lfs merge=lfs -text
|
14
.github/actions/setup-docker-environment/action.yml
vendored
Normal file
14
.github/actions/setup-docker-environment/action.yml
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
name: "Setup Docker Environment"
|
||||
description: "Common steps for setting up Docker build environment (checkout, QEMU, and Buildx)"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4.2.2
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3.4.0
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.9.0
|
37
.github/workflows/feature-pr-build.yml
vendored
Normal file
37
.github/workflows/feature-pr-build.yml
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
name: Build Docker Images for Pull Request
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository actions
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Docker Environment
|
||||
uses: ./.github/actions/setup-docker-environment
|
||||
|
||||
- name: Build Docker image
|
||||
uses: docker/build-push-action@v6.13.0
|
||||
with:
|
||||
context: .
|
||||
push: false
|
||||
tags: ${{ github.repository_owner }}/${{ github.event.repository.name }}:${{ github.event.pull_request.number }}
|
||||
platforms: |
|
||||
linux/amd64
|
||||
linux/arm64
|
||||
|
||||
- name: Build monolith Docker image
|
||||
uses: docker/build-push-action@v6.13.0
|
||||
with:
|
||||
context: .
|
||||
tags: ${{ github.repository_owner }}/${{ github.event.repository.name }}:${{ github.event.pull_request.number }}-monolith
|
||||
file: ./Monolith.dockerfile
|
||||
platforms: |
|
||||
linux/amd64
|
||||
linux/arm64
|
36
.github/workflows/main.yml
vendored
Normal file
36
.github/workflows/main.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
name: Build and Push Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
REGISTRY_URL: gitea.kobim.cloud
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository actions
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Docker Environment
|
||||
uses: ./.github/actions/setup-docker-environment
|
||||
|
||||
- name: Log in to Docker registry
|
||||
uses: docker/login-action@v3.3.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY_URL }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.REGISTRY_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v6.13.0
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ env.REGISTRY_URL }}/${{ github.repository_owner }}/${{ github.event.repository.name }}:latest
|
||||
platforms: |
|
||||
linux/amd64
|
||||
linux/arm64
|
55
.github/workflows/monolith.yml
vendored
Normal file
55
.github/workflows/monolith.yml
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
name: Build and Push Docker Image
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
env:
|
||||
REGISTRY_URL: gitea.kobim.cloud
|
||||
DOCKERHUB_USERNAME: kobimex
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository actions
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Setup Docker Environment
|
||||
uses: ./.github/actions/setup-docker-environment
|
||||
|
||||
- name: Log in to custom Docker registry
|
||||
uses: docker/login-action@v3.3.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY_URL }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.REGISTRY_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image to custom registry
|
||||
uses: docker/build-push-action@v6.13.0
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ env.REGISTRY_URL }}/${{ github.repository_owner }}/${{ github.event.repository.name }}-monolith:latest
|
||||
file: ./Monolith.dockerfile
|
||||
platforms: |
|
||||
linux/amd64
|
||||
linux/arm64
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@v3.3.0
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and push Docker image to Docker Hub
|
||||
uses: docker/build-push-action@v6.13.0
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ env.DOCKERHUB_USERNAME }}/${{ github.event.repository.name }}-monolith:latest
|
||||
file: ./Monolith.dockerfile
|
||||
platforms: |
|
||||
linux/amd64
|
||||
linux/arm64
|
11
.gitignore
vendored
11
.gitignore
vendored
@@ -262,6 +262,7 @@ TSWLatexianTemp*
|
||||
|
||||
# gummi
|
||||
.*.swp
|
||||
*.swp
|
||||
|
||||
# KBibTeX
|
||||
*~[0-9]*
|
||||
@@ -293,7 +294,15 @@ TSWLatexianTemp*
|
||||
.ipynb_checkpoints/
|
||||
env/
|
||||
.env
|
||||
.env.hetzner
|
||||
__pycache__/
|
||||
test/
|
||||
venv/
|
||||
.venv/
|
||||
.venv/
|
||||
|
||||
# Node.js
|
||||
node_modules/
|
||||
npm-debug.log
|
||||
yarn-error.log
|
||||
yarn-debug.log*
|
||||
background.bundle.js
|
18
Dockerfile
18
Dockerfile
@@ -1,8 +1,15 @@
|
||||
FROM python:3.13.1-slim-bookworm
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
# Install dependencies
|
||||
# Install Python and curl
|
||||
RUN apt-get update && apt-get install -y python3 python3-pip python3-venv curl
|
||||
|
||||
# Create and activate a virtual environment
|
||||
RUN python3 -m venv /app/venv
|
||||
ENV PATH="/app/venv/bin:$PATH"
|
||||
|
||||
# Install dependencies with venv
|
||||
COPY requirements.txt /app/requirements.txt
|
||||
RUN pip install --no-cache-dir -r /app/requirements.txt
|
||||
RUN /app/venv/bin/pip install -r /app/requirements.txt
|
||||
|
||||
# Copy the application
|
||||
COPY main.py /app
|
||||
@@ -10,4 +17,7 @@ COPY utils/ /app/utils
|
||||
WORKDIR /app
|
||||
|
||||
# Run the application
|
||||
CMD ["python", "main.py"]
|
||||
CMD ["/app/venv/bin/python", "main.py"]
|
||||
|
||||
# Healthcheck
|
||||
HEALTHCHECK --interval=5s --timeout=10s --retries=5 --start-period=5s CMD curl -f http://localhost:9092/heartbeat || exit 1
|
21
LICENSE
Normal file
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Mirko Milovanovic
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
50
Monolith.dockerfile
Normal file
50
Monolith.dockerfile
Normal file
@@ -0,0 +1,50 @@
|
||||
FROM node:22.14.0-bookworm-slim AS build
|
||||
|
||||
# Copy the webrtc-internals-exporter files
|
||||
COPY webrtc-internals-exporter /tmp/webrtc-internals-exporter
|
||||
|
||||
WORKDIR /tmp/webrtc-internals-exporter/webpack
|
||||
|
||||
# Install dependencies
|
||||
RUN --mount=type=cache,target=/root/.npm \
|
||||
npm install
|
||||
|
||||
# Build the project
|
||||
RUN npm run build
|
||||
|
||||
FROM selenium/standalone-chromium:129.0
|
||||
|
||||
# Install Python-virtualenv
|
||||
RUN sudo apt-get update && sudo sudo apt-get install -y python3-venv
|
||||
|
||||
WORKDIR /tmp
|
||||
|
||||
# Install Telegraf
|
||||
RUN wget -q https://repos.influxdata.com/influxdata-archive_compat.key && \
|
||||
echo '393e8779c89ac8d958f81f942f9ad7fb82a25e133faddaf92e15b16e6ac9ce4c influxdata-archive_compat.key' | sha256sum -c && \
|
||||
cat influxdata-archive_compat.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg > /dev/null && \
|
||||
echo 'deb [signed-by=/etc/apt/trusted.gpg.d/influxdata-archive_compat.gpg] https://repos.influxdata.com/debian stable main' | sudo tee /etc/apt/sources.list.d/influxdata.list && \
|
||||
sudo apt-get update && sudo apt-get install -y telegraf
|
||||
|
||||
# Create and activate a virtual environment
|
||||
RUN python3 -m venv ./venv
|
||||
ENV PATH="/tmp/venv/bin:$PATH"
|
||||
|
||||
# Install dependencies with venv
|
||||
COPY requirements.txt ./requirements.txt
|
||||
RUN ./venv/bin/pip install -r ./requirements.txt
|
||||
|
||||
# Copy files
|
||||
COPY main.py .
|
||||
COPY utils/ ./utils
|
||||
COPY telegraf.conf ./telegraf.conf
|
||||
COPY webrtc-internals-exporter /tmp/webrtc-internals-exporter
|
||||
COPY --from=build /tmp/webrtc-internals-exporter/background.bundle.js /tmp/webrtc-internals-exporter/background.bundle.js
|
||||
COPY --chown="${SEL_UID}:${SEL_GID}" monolith-entrypoint.sh /opt/bin/collector.sh
|
||||
|
||||
# Run the entrypoint
|
||||
RUN chmod +x /opt/bin/collector.sh
|
||||
ENTRYPOINT ["/opt/bin/collector.sh"]
|
||||
|
||||
# Healthcheck
|
||||
HEALTHCHECK --interval=5s --timeout=10s --retries=5 --start-period=5s CMD curl -f http://localhost:9092/heartbeat || exit 1
|
112
README.md
112
README.md
@@ -2,63 +2,92 @@
|
||||
|
||||
peertube-collector is a project designed to collect and analyze WebRTC statistics from a Chromium browser and export them to a MongoDB service. This project includes a Docker setup for running the necessary services.
|
||||
|
||||
## Working Project Structure
|
||||
|
||||
```
|
||||
peertube-collector/
|
||||
├── requirements.txt
|
||||
├── telegraf.conf
|
||||
├── docker-compose.yml
|
||||
├── Dockerfile
|
||||
├── main.py
|
||||
├── .env
|
||||
└── utils/
|
||||
└── webrtc-internals-exporter/
|
||||
```
|
||||
|
||||
|
||||
## Prerequisites
|
||||
|
||||
**Linux** based OS with the following:
|
||||
|
||||
### Software:
|
||||
- Docker (with compose support)
|
||||
- Docker Engine Community version is required. To install Docker CE, follow the official [install instructions](https://docs.docker.com/engine/install/).
|
||||
|
||||
### Open ports on localhost/loopback interface:
|
||||
- 4444 (Selenium)
|
||||
- 7900 (Selenium VNC - Optional)
|
||||
### Ports:
|
||||
|
||||
If needed, you can open these ports in `ufw` by running the following commands:
|
||||
#### External (OPTIONAL PROBABLY NOT NEEDED!!!):
|
||||
These ports are actively used by selenium and the collector services.
|
||||
|
||||
- 50000:60000/udp (WebRTC)
|
||||
- WebRTC NAT traversal requires a range of ports to be open.
|
||||
The range needs to be fairly large since the port is chosen randomly by the STUN server.
|
||||
- 27107/tcp (MongoDB)
|
||||
|
||||
Ports can be opened in the host machine's firewall with:
|
||||
```sh
|
||||
ufw allow from 172.30.0.1 to 127.0.0.1 port 4444
|
||||
ufw allow from 172.30.0.1 to 127.0.0.1 port 7900
|
||||
ufw allow 50000:60000/udp
|
||||
ufw allow 27107/tcp
|
||||
```
|
||||
|
||||
|
||||
## Setup
|
||||
## Setup with Docker Compose
|
||||
|
||||
1. Clone the repository:
|
||||
```sh
|
||||
git clone <repository-url>
|
||||
cd peertube-collector
|
||||
```
|
||||
|
||||
2. Create and configure the environment file based on the `.env.example` file:
|
||||
```sh
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
|
||||
3. Start the Docker containers:
|
||||
3. Ajust the firewall settings to allow the necessary ports if needed
|
||||
4. Start the Docker containers:
|
||||
```sh
|
||||
docker compose up
|
||||
```
|
||||
or in detached mode:
|
||||
```sh
|
||||
docker-compose up -d
|
||||
docker compose up --abort-on-container-failure
|
||||
```
|
||||
|
||||
The collector will start collecting WebRTC stats from the specified PeerTube instance.
|
||||
The collector will start gathering WebRTC stats from the Selenium container and sending them to the Telegraf service.
|
||||
|
||||
To stop the Docker containers run: `docker compose down -v`
|
||||
|
||||
### Setup with Monolithic image:
|
||||
|
||||
1. Clone the repository:
|
||||
```sh
|
||||
git clone <repository-url>
|
||||
cd peertube-collector
|
||||
```
|
||||
2. Create and configure the environment file based on the `.env.example` file:
|
||||
```sh
|
||||
cp .env.example .env
|
||||
```
|
||||
3. Ajust the firewall settings to allow the necessary ports if needed
|
||||
4. Start the Docker container:
|
||||
```sh
|
||||
docker run --rm -p 7900:7900 --env-file .env --name peertube-collector --pull always --shm-size="2g" gitea.kobim.cloud/kobim/peertube-collector-monolith:latest
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```sh
|
||||
docker run --rm -p 7900:7900 --env-file .env --name peertube-collector --pull always --shm-size="2g" kobimex/peertube-collector-monolith:latest
|
||||
```
|
||||
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Environment Variable | Service | Default Value | Description |
|
||||
| ------------------------------- | -------------------- | ------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `TELEGRAF_HOSTNAME` | telegraf | None, **must** be set | Hostname used to identify the host/user between sessions |
|
||||
| `TELEGRAF_MONGODB_DSN` | telegraf | `mongodb://stats_user...` | DSN for the MongoDB service |
|
||||
| `TELEGRAF_MONGODB_DATABASE` | telegraf | `statistics` | Database name for the MongoDB service |
|
||||
| `VIDEO_URL` | collector | `https://tube.kobim.cloud/...` | URL for the video to be analyzed |
|
||||
| `HUB_URL` | collector | None | URL for the Selenium Hub. If not set, the local Chrome driver will be used |
|
||||
| `SOCKET_URL` | collector | `localhost` | Socket URL for Telegraf service |
|
||||
| `SOCKET_PORT` | collector & telegraf | `8094` | Socket port for Telegraf service |
|
||||
| `WEBRTC_INTERNALS_PATH` | collector | None | **Absolute** path for WebRTC internals exporter extension. When **not** set the extension path is construced relative to the current main script location. |
|
||||
| `WEBRTC_INTERNALS_EXPORTER_URL` | WebRTC extension | `http://localhost:9092` | Server URL for the WebRTC internals exporter extension |
|
||||
|
||||
Variables can be set in the `.env` file.
|
||||
An example configuration is provided in the `.env.example` file.
|
||||
|
||||
### Monitoring
|
||||
A noVNC server is available at [http://localhost:7900](http://localhost:7900/?autoconnect=1&resize=scale&password=secret) to monitor the Selenium container. The password is `secret`.
|
||||
|
||||
## Components
|
||||
|
||||
@@ -73,10 +102,21 @@ The `docker-compose.yml` file defines the following services:
|
||||
|
||||
The `Dockerfile` sets up the Python environment and installs the necessary dependencies to run the `main.py` script.
|
||||
|
||||
### Monolithic Dockerfile
|
||||
|
||||
`Monolith.dockerfile` is a single Dockerfile that combines the Selenium, Telegraf, and Collector services into a single container. This is useful for deployment in a single container environment.
|
||||
|
||||
### Main Python Script
|
||||
|
||||
The `main.py` script sets up the Selenium WebDriver, collects WebRTC stats, and sends them to the Telegraf service.
|
||||
|
||||
### WebRTC Internals Exporter
|
||||
|
||||
The `webrtc-internals-exporter` directory contains a Chromium extension that collects WebRTC stats from the browser.
|
||||
The `webrtc-internals-exporter` directory contains a Chromium extension that collects WebRTC stats from the browser.
|
||||
It uses Webpack to replace the server collector endpoint with an environment variable.
|
||||
|
||||
# Credits
|
||||
|
||||
- [WebRTC Internals Exporter](https://github.com/vpalmisano/webrtc-internals-exporter)
|
||||
- [WebRTC debugging with Prometheus/Grafana](https://medium.com/@vpalmisano/webrtc-debugging-with-prometheus-grafana-254b6ac71063)
|
||||
- [MongoDB Docker Compose examples](https://github.com/TGITS/docker-compose-examples/tree/main/mongodb-docker-compose-examples)
|
@@ -3,7 +3,7 @@ services:
|
||||
container_name: selenium-standalone-chromium
|
||||
image: selenium/standalone-chromium:129.0
|
||||
volumes:
|
||||
- ./webrtc-internals-exporter:/tmp/webrtc-internals-exporter:ro
|
||||
- build-extension:/tmp/webrtc-internals-exporter
|
||||
shm_size: "2g"
|
||||
attach: false
|
||||
healthcheck:
|
||||
@@ -11,7 +11,11 @@ services:
|
||||
interval: 5s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
network_mode: host
|
||||
pull_policy: always
|
||||
ports:
|
||||
- "7900:7900"
|
||||
networks:
|
||||
- backend
|
||||
|
||||
telegraf:
|
||||
container_name: telegraf
|
||||
@@ -22,12 +26,32 @@ services:
|
||||
- DATABASE=${TELEGRAF_MONGODB_DATABASE:?"Database name is required"}
|
||||
- DSN=${TELEGRAF_MONGODB_DSN:?"DSN is required"}
|
||||
- HOSTNAME=${TELEGRAF_HOSTNAME:?"Hostname is required"}
|
||||
- SOCKET_PORT=${SOCKET_PORT:?"Socket port is required"}
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:8080"]
|
||||
interval: 5s
|
||||
timeout: 10s
|
||||
retries: 5
|
||||
restart: unless-stopped
|
||||
pull_policy: always
|
||||
networks:
|
||||
- backend
|
||||
build-extension:
|
||||
container_name: build-extension
|
||||
image: node:22.14.0-bookworm-slim
|
||||
volumes:
|
||||
- ./webrtc-internals-exporter:/tmp/webrtc-internals-exporter:ro
|
||||
- build-extension:/tmp/webrtc-internals-exporter-build
|
||||
working_dir: /tmp/webrtc-internals-exporter-build/webpack
|
||||
command:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
cp -r /tmp/webrtc-internals-exporter/* /tmp/webrtc-internals-exporter-build
|
||||
npm install
|
||||
npm run build
|
||||
environment:
|
||||
- WEBRTC_INTERNALS_EXPORTER_URL=http://collector
|
||||
pull_policy: always
|
||||
networks:
|
||||
- backend
|
||||
|
||||
@@ -42,13 +66,14 @@ services:
|
||||
condition: service_healthy
|
||||
telegraf:
|
||||
condition: service_healthy
|
||||
build-extension:
|
||||
condition: service_completed_successfully
|
||||
environment:
|
||||
- VIDEO_URL=${VIDEO_URL:?"Video URL is required"}
|
||||
restart: unless-stopped
|
||||
ports:
|
||||
- "9092:9092"
|
||||
extra_hosts:
|
||||
- "host.docker.internal:host-gateway"
|
||||
- SOCKET_URL=telegraf
|
||||
- HUB_URL=http://selenium:4444
|
||||
- WEBRTC_INTERNALS_PATH=/tmp/webrtc-internals-exporter
|
||||
pull_policy: always
|
||||
networks:
|
||||
- backend
|
||||
|
||||
@@ -56,4 +81,7 @@ networks:
|
||||
backend:
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.30.0.0/16
|
||||
- subnet: 172.100.0.0/16
|
||||
|
||||
volumes:
|
||||
build-extension:
|
||||
|
394
main.py
394
main.py
@@ -1,13 +1,14 @@
|
||||
import signal
|
||||
import json
|
||||
import time
|
||||
import socket
|
||||
import logging
|
||||
import os
|
||||
import argparse
|
||||
from time import sleep
|
||||
from functools import partial
|
||||
from http.server import HTTPServer
|
||||
from utils.PostHandler import Handler
|
||||
from utils.ColoredFormatter import ColoredFormatter
|
||||
from utils.Convenience import *
|
||||
from bs4 import BeautifulSoup as bs
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.chrome.options import Options
|
||||
@@ -16,17 +17,39 @@ from selenium.webdriver import ActionChains
|
||||
from selenium.webdriver.support.wait import WebDriverWait
|
||||
from selenium.webdriver.support import expected_conditions as ec
|
||||
|
||||
# Plugin system imports
|
||||
import importlib
|
||||
import importlib.util
|
||||
import inspect
|
||||
import glob
|
||||
import sys # Import the sys module
|
||||
from utils.plugins_base import StatsSetupPlugin, StatsDownloadPlugin
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
args = None
|
||||
|
||||
def setupLogger():
|
||||
logging_format = "[%(asctime)s] (%(levelname)s) %(module)s - %(funcName)s: %(message)s"
|
||||
logging.basicConfig(level=logging.INFO, format=logging_format)
|
||||
logging.basicConfig(level=firstValid(args.log_level, os.getenv('LOG_LEVEL'), default='INFO'), format=logging_format) # type: ignore
|
||||
(logger := logging.getLogger(__name__)).setLevel(logging.INFO)
|
||||
logger.propagate = False
|
||||
(logger_handler := logging.StreamHandler()).setFormatter(
|
||||
ColoredFormatter(fmt=logging_format)
|
||||
)
|
||||
logger.addHandler(logger_handler)
|
||||
|
||||
|
||||
def setupArgParser():
|
||||
parser = argparse.ArgumentParser(description='Collector for PeerTube stats.')
|
||||
parser.add_argument('-u', '--url', type=str, help='URL of the video to collect stats for.')
|
||||
parser.add_argument('--socket-url', type=str, help='URL of the socket to send the stats to. Default: localhost')
|
||||
parser.add_argument('--socket-port', type=int, help='Port of the socket to send the stats to. Default: 8094')
|
||||
parser.add_argument('--hub-url', type=str, help='URL of the Selenium hub to connect to. If not provided, local Chrome driver will be used.')
|
||||
parser.add_argument('--webrtc-internals-path', type=str, help='Path to the WebRTC internals extension.')
|
||||
parser.add_argument('--log-level', type=str, help='Log level to use. Default: INFO')
|
||||
parser.add_argument('--plugin-dir', type=str, help='Path to the plugin directory.')
|
||||
|
||||
return parser
|
||||
|
||||
def interrupt_handler(signum, driver: webdriver.Remote):
|
||||
logger.info(f'Handling signal {signum} ({signal.Signals(signum).name}).')
|
||||
@@ -34,163 +57,264 @@ def interrupt_handler(signum, driver: webdriver.Remote):
|
||||
driver.quit()
|
||||
raise SystemExit
|
||||
|
||||
def setupChromeDriver():
|
||||
def setupChromeDriver(command_executor: str | None, webrtc_internals_path: str) -> webdriver.Remote | webdriver.Chrome:
|
||||
logger.log(logging.INFO, 'Setting up Chrome driver.')
|
||||
chrome_options = Options()
|
||||
#chrome_options.add_argument("--headless")
|
||||
chrome_options.add_argument("--no-sandbox")
|
||||
chrome_options.add_argument("--mute-audio")
|
||||
chrome_options.add_argument("--window-size=1280,720")
|
||||
#chrome_options.add_argument("--disable-dev-shm-usage")
|
||||
chrome_options.add_argument("--no-default-browser-check")
|
||||
chrome_options.add_argument("--disable-features=WebRtcHideLocalIpsWithMdns")
|
||||
#chrome_options.add_argument(f"--load-extension={os.path.abspath(os.path.join(os.path.dirname(__file__), 'webrtc-internals-exporter'))}")
|
||||
chrome_options.add_argument("--load-extension=/tmp/webrtc-internals-exporter")
|
||||
chrome_options.add_argument(f"--load-extension={webrtc_internals_path}")
|
||||
chrome_options.add_experimental_option('prefs', {'intl.accept_languages': 'en,en_US'})
|
||||
|
||||
#driver = webdriver.Chrome(options=chrome_options)
|
||||
driver = webdriver.Remote(command_executor='http://host.docker.internal:4444', options=chrome_options)
|
||||
|
||||
if command_executor is not None:
|
||||
driver = webdriver.Remote(command_executor=command_executor, options=chrome_options)
|
||||
logger.warning(f'Using Selenium hub at {command_executor}.')
|
||||
else:
|
||||
driver = webdriver.Chrome(options=chrome_options)
|
||||
logger.warning('No Selenium hub URL provided, using local Chrome driver.')
|
||||
|
||||
logger.log(logging.INFO, 'Chrome driver setup complete.')
|
||||
|
||||
return driver
|
||||
|
||||
def saveStats(stats: list):
|
||||
try:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
logger.log(logging.DEBUG, f'Saving stats: {json.dumps(stats, indent=4)}')
|
||||
sock.sendto(json.dumps(stats).encode(), ('telegraf', 8094))
|
||||
sock.close()
|
||||
logger.log(logging.DEBUG, 'Sent stats to socket.')
|
||||
except socket.error as e:
|
||||
logger.error(f'Got socket error: {e}')
|
||||
def convert_to_bytes(down, downUnit):
|
||||
return float(down) * (1000 ** {'B': 0, 'KB': 1, 'MB': 2, 'GB': 3}[downUnit])
|
||||
|
||||
def downloadStats(driver: webdriver.Chrome, peersDict: dict):
|
||||
html = driver.find_element(By.CLASS_NAME ,'vjs-stats-list').get_attribute('innerHTML')
|
||||
if html is not None:
|
||||
htmlBS = bs(html, 'html.parser')
|
||||
else:
|
||||
raise ValueError("html is None")
|
||||
|
||||
stats = htmlBS.find_all('div', attrs={'style': 'display: block;'})
|
||||
|
||||
playerStats = {
|
||||
stat.div.text: stat.span.text.replace('\u21d3', 'down').replace('down/', 'down /').replace('\u21d1 ', 'up').replace('\u21d1', 'up').replace('\u00b7', '-').strip()
|
||||
for stat in stats
|
||||
}
|
||||
|
||||
keys = list(playerStats.keys())
|
||||
for stat in keys:
|
||||
if 'Viewport / Frames' == stat:
|
||||
viewport, frames = playerStats[stat].split(' / ')
|
||||
width, height = viewport.split('x')
|
||||
height, devicePixelRatio = height.split('*')
|
||||
dropped, total = frames.split(' of ')[0].split()[0], frames.split(' of ')[1].split()[0]
|
||||
playerStats[stat] = {'Width': int(width), 'Height': int(height), 'Pixel ratio': float(devicePixelRatio), 'Frames': {'Dropped': int(dropped), 'Total': int(total)}}
|
||||
|
||||
if 'Codecs' == stat:
|
||||
video, audio = playerStats[stat].split(' / ')
|
||||
playerStats[stat] = {'Video': video, 'Audio': audio}
|
||||
# Default Plugin Implementations
|
||||
class DefaultStatsSetupPlugin(StatsSetupPlugin):
|
||||
def setup_stats(self, driver: webdriver.Remote, url: str, retries: int = 5) -> webdriver.Remote:
|
||||
logger.log(logging.INFO, 'Setting up stats.')
|
||||
actions = ActionChains(driver)
|
||||
wait = WebDriverWait(driver, 30, poll_frequency=0.2)
|
||||
|
||||
if 'Volume' == stat:
|
||||
if ' (' in playerStats[stat]:
|
||||
volume, muted = playerStats[stat].split(' (')
|
||||
playerStats[stat] = {'Volume': int(volume), 'Muted': 'muted' in muted}
|
||||
sleep(2)
|
||||
|
||||
for attempt in range(retries):
|
||||
driver.get(url)
|
||||
try:
|
||||
wait.until(ec.presence_of_element_located((By.CLASS_NAME, 'vjs-big-play-button')))
|
||||
break
|
||||
except Exception:
|
||||
logger.error(f'Timeout while waiting for the big play button to be present. Attempt {attempt + 1} of {retries}')
|
||||
if attempt == retries - 1:
|
||||
logger.error('Timeout limit reached. Exiting.')
|
||||
driver.quit()
|
||||
raise SystemExit(1)
|
||||
|
||||
actions.click(driver.find_element(By.CLASS_NAME ,'video-js')).perform()
|
||||
wait.until(ec.visibility_of_element_located((By.CLASS_NAME, 'vjs-control-bar')))
|
||||
actions.context_click(driver.find_element(By.CLASS_NAME ,'video-js')).perform()
|
||||
statsForNerds = driver.find_elements(By.CLASS_NAME ,'vjs-menu-item')
|
||||
actions.click(statsForNerds[-1]).perform()
|
||||
wait.until(ec.presence_of_element_located((By.CSS_SELECTOR, 'div.vjs-stats-content[style="display: block;"]')))
|
||||
actions.move_to_element(driver.find_element(By.CLASS_NAME ,'vjs-control-bar')).perform()
|
||||
logger.log(logging.INFO, 'Stats setup complete.')
|
||||
|
||||
return driver
|
||||
|
||||
class DefaultStatsDownloadPlugin(StatsDownloadPlugin):
|
||||
def download_stats(self, driver: webdriver.Remote, peersDict: dict, socket_url: str, socket_port: int):
|
||||
html = driver.find_element(By.CLASS_NAME ,'vjs-stats-list').get_attribute('innerHTML')
|
||||
if html is not None:
|
||||
htmlBS = bs(html, 'html.parser')
|
||||
else:
|
||||
raise ValueError("html is None")
|
||||
|
||||
stats = htmlBS.find_all('div', attrs={'style': 'display: block;'})
|
||||
|
||||
playerStats = {
|
||||
stat.div.text: stat.span.text.replace('\u21d3', 'down').replace('down/', 'down /').replace('\u21d1 ', 'up').replace('\u21d1', 'up').replace('\u00b7', '-').strip() # type: ignore
|
||||
for stat in stats
|
||||
}
|
||||
|
||||
keys = list(playerStats.keys())
|
||||
for stat in keys:
|
||||
if 'Viewport / Frames' == stat:
|
||||
viewport, frames = playerStats[stat].split(' / ')
|
||||
width, height = viewport.split('x')
|
||||
height, devicePixelRatio = height.split('*')
|
||||
dropped, total = frames.split(' of ')[0].split()[0], frames.split(' of ')[1].split()[0]
|
||||
playerStats[stat] = {'Width': int(width), 'Height': int(height), 'Pixel ratio': float(devicePixelRatio), 'Frames': {'Dropped': int(dropped), 'Total': int(total)}}
|
||||
|
||||
if 'Codecs' == stat:
|
||||
video, audio = playerStats[stat].split(' / ')
|
||||
playerStats[stat] = {'Video': video, 'Audio': audio}
|
||||
|
||||
if 'Volume' == stat:
|
||||
if ' (' in playerStats[stat]:
|
||||
volume, muted = playerStats[stat].split(' (')
|
||||
playerStats[stat] = {'Volume': int(volume), 'Muted': 'muted' in muted}
|
||||
else:
|
||||
playerStats[stat] = {'Volume': int(playerStats[stat]), 'Muted': False}
|
||||
|
||||
if 'Connection Speed' == stat:
|
||||
speed, unit = playerStats[stat].split()
|
||||
|
||||
speedBytes = float(speed) * (1024 ** {'B/s': 0, 'KB/s': 1, 'MB/s': 2, 'GB/s': 3}[unit])
|
||||
|
||||
playerStats[stat] = {'Speed': speedBytes, 'Granularity': 's'}
|
||||
|
||||
if 'Network Activity' == stat:
|
||||
downString, upString = playerStats[stat].split(' / ')
|
||||
|
||||
down, downUnit = downString.replace('down', '').strip().split()
|
||||
up, upUnit = upString.replace('up', '').strip().split()
|
||||
|
||||
downBytes = convert_to_bytes(down, downUnit)
|
||||
upBytes = convert_to_bytes(up, upUnit)
|
||||
|
||||
playerStats[stat] = {'Down': downBytes, 'Up': upBytes}
|
||||
|
||||
if 'Total Transfered' == stat:
|
||||
downString, upString = playerStats[stat].split(' / ')
|
||||
|
||||
down, downUnit = downString.replace('down', '').strip().split()
|
||||
up, upUnit = upString.replace('up', '').strip().split()
|
||||
|
||||
downBytes = convert_to_bytes(down, downUnit)
|
||||
upBytes = convert_to_bytes(up, upUnit)
|
||||
|
||||
playerStats[stat] = {'Down': downBytes, 'Up': upBytes}
|
||||
|
||||
if 'Download Breakdown' == stat:
|
||||
server, peer = playerStats[stat].split(' - ')
|
||||
|
||||
server, serverUnit = server.replace('from servers', '').strip().split()
|
||||
peer, peerUnit = peer.replace('from peers', '').strip().split()
|
||||
|
||||
serverBytes = convert_to_bytes(server, serverUnit)
|
||||
peerBytes = convert_to_bytes(peer, peerUnit)
|
||||
|
||||
playerStats[stat] = {'Server': serverBytes, 'Peers': peerBytes}
|
||||
|
||||
if 'Buffer State' == stat:
|
||||
del(playerStats[stat])
|
||||
|
||||
if 'Live Latency' == stat:
|
||||
latency, edge = playerStats[stat].split(' (from edge: ')
|
||||
latency = sum(int(x) * 60 ** i for i, x in enumerate(reversed([part for part in latency.replace('s', '').split('m') if part])))
|
||||
edge = sum(int(x) * 60 ** i for i, x in enumerate(reversed([part for part in edge.replace('s', '').replace(')', '').split('m') if part])))
|
||||
playerStats[stat] = {'Latency': latency, 'Edge': edge}
|
||||
|
||||
stats = {
|
||||
'player': playerStats,
|
||||
'peers': peersDict,
|
||||
'url': driver.current_url,
|
||||
'timestamp': int(time.time() * 1000),
|
||||
'session': driver.session_id
|
||||
}
|
||||
|
||||
super().saveStats([stats], socket_url, socket_port)
|
||||
|
||||
# Plugin loading mechanism
|
||||
def load_plugins(plugin_dir: str) -> tuple[StatsSetupPlugin | None, StatsDownloadPlugin | None]:
|
||||
"""
|
||||
Loads plugins from the specified directory.
|
||||
|
||||
Args:
|
||||
plugin_dir: The directory to search for plugins.
|
||||
|
||||
Returns:
|
||||
A tuple containing the loaded StatsSetupPlugin and StatsDownloadPlugin, or (None, None) if no plugins were found.
|
||||
"""
|
||||
|
||||
logger.info(f"Loading plugins from {plugin_dir}")
|
||||
|
||||
setup_plugin = None
|
||||
download_plugin = None
|
||||
|
||||
plugin_files = glob.glob(os.path.join(plugin_dir, "*.py"))
|
||||
|
||||
# Log the contents of the plugin directory
|
||||
logger.debug(f"Plugin directory contents: {os.listdir(plugin_dir)}")
|
||||
|
||||
for plugin_file in plugin_files:
|
||||
module_name = os.path.basename(plugin_file)[:-3] # Remove .py extension
|
||||
logger.debug(f"Loading plugin file {plugin_file}")
|
||||
try:
|
||||
spec = importlib.util.spec_from_file_location(module_name, plugin_file)
|
||||
logger.debug(f"Spec: {spec}")
|
||||
if spec is None:
|
||||
logger.warning(f"Can't load plugin file {plugin_file}")
|
||||
continue
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
logger.debug(f"Module: {module}")
|
||||
if spec.loader is not None:
|
||||
spec.loader.exec_module(module)
|
||||
else:
|
||||
playerStats[stat] = {'Volume': int(playerStats[stat]), 'Muted': False}
|
||||
logger.warning(f"Can't load module {module_name} from {plugin_file}")
|
||||
|
||||
if 'Connection Speed' == stat:
|
||||
speed, unit = playerStats[stat].split()
|
||||
|
||||
speedBytes = int(speed) * (1024 ** {'B/s': 0, 'KB/s': 1, 'MB/s': 2, 'GB/s': 3}[unit])
|
||||
|
||||
playerStats[stat] = {'Speed': int(speedBytes), 'Granularity': 's'}
|
||||
|
||||
if 'Network Activity' == stat:
|
||||
downString, upString = playerStats[stat].split(' / ')
|
||||
|
||||
down, downUnit = downString.replace('down', '').strip().split()
|
||||
up, upUnit = upString.replace('up', '').strip().split()
|
||||
|
||||
downBytes = int(down) * (1024 ** {'B': 0, 'KB': 1, 'MB': 2, 'GB': 3}[downUnit])
|
||||
upBytes = int(up) * (1024 ** {'B': 0, 'KB': 1, 'MB': 2, 'GB': 3}[upUnit])
|
||||
|
||||
playerStats[stat] = {'Down': downBytes, 'Up': upBytes}
|
||||
|
||||
if 'Total Transfered' == stat:
|
||||
downString, upString = playerStats[stat].split(' / ')
|
||||
|
||||
down, downUnit = downString.replace('down', '').strip().split()
|
||||
up, upUnit = upString.replace('up', '').strip().split()
|
||||
|
||||
downBytes = int(down) * (1024 ** {'B': 0, 'KB': 1, 'MB': 2, 'GB': 3}[downUnit])
|
||||
upBytes = int(up) * (1024 ** {'B': 0, 'KB': 1, 'MB': 2, 'GB': 3}[upUnit])
|
||||
|
||||
playerStats[stat] = {'Down': downBytes, 'Up': upBytes}
|
||||
|
||||
if 'Download Breakdown' == stat:
|
||||
server, peer = playerStats[stat].split(' - ')
|
||||
|
||||
server, serverUnit = server.replace('from servers', '').strip().split()
|
||||
peer, peerUnit = peer.replace('from peers', '').strip().split()
|
||||
|
||||
serverBytes = int(server) * (1024 ** {'B': 0, 'KB': 1, 'MB': 2, 'GB': 3}[serverUnit])
|
||||
peerBytes = int(peer) * (1024 ** {'B': 0, 'KB': 1, 'MB': 2, 'GB': 3}[peerUnit])
|
||||
|
||||
playerStats[stat] = {'Server': serverBytes, 'Peers': peerBytes}
|
||||
|
||||
if 'Buffer State' == stat:
|
||||
del(playerStats[stat])
|
||||
|
||||
if 'Live Latency' == stat:
|
||||
latency, edge = playerStats[stat].split(' (from edge: ')
|
||||
latency = sum(int(x) * 60 ** i for i, x in enumerate(reversed([part for part in latency.replace('s', '').split('m') if part])))
|
||||
edge = sum(int(x) * 60 ** i for i, x in enumerate(reversed([part for part in edge.replace('s', '').replace(')', '').split('m') if part])))
|
||||
playerStats[stat] = {'Latency': latency, 'Edge': edge}
|
||||
for name, obj in inspect.getmembers(module):
|
||||
logger.debug(f"Found member: {name} in module {module_name}")
|
||||
if inspect.isclass(obj):
|
||||
if issubclass(obj, StatsSetupPlugin) and obj is not StatsSetupPlugin:
|
||||
logger.info(f"Found StatsSetupPlugin: {obj.__name__}")
|
||||
setup_plugin = obj()
|
||||
logger.debug(f"Loaded StatsSetupPlugin: {obj.__name__} from {plugin_file}")
|
||||
elif issubclass(obj, StatsDownloadPlugin) and obj is not StatsDownloadPlugin:
|
||||
logger.info(f"Found StatsDownloadPlugin: {obj.__name__}")
|
||||
download_plugin = obj()
|
||||
logger.debug(f"Loaded StatsDownloadPlugin: {obj.__name__} from {plugin_file}")
|
||||
else:
|
||||
logger.debug(f"Class {obj.__name__} is not a subclass of StatsSetupPlugin or StatsDownloadPlugin")
|
||||
else:
|
||||
logger.debug(f"{name} is not a class")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error loading plugin {plugin_file}: {e}")
|
||||
|
||||
return setup_plugin, download_plugin
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = setupArgParser().parse_args()
|
||||
|
||||
stats = {
|
||||
'player': playerStats,
|
||||
'peers': peersDict,
|
||||
'url': driver.current_url,
|
||||
'timestamp': int(time.time() * 1000),
|
||||
'session': driver.session_id
|
||||
}
|
||||
|
||||
saveStats([stats])
|
||||
|
||||
def setupStats(driver: webdriver.Remote, url: str):
|
||||
logger.log(logging.INFO, 'Setting up stats.')
|
||||
actions = ActionChains(driver)
|
||||
wait = WebDriverWait(driver, 30, poll_frequency=0.2)
|
||||
|
||||
driver.get(url)
|
||||
|
||||
wait.until(ec.presence_of_element_located((By.CLASS_NAME, 'vjs-big-play-button')))
|
||||
actions.click(driver.find_element(By.CLASS_NAME ,'video-js')).perform()
|
||||
wait.until(ec.visibility_of_element_located((By.CLASS_NAME, 'vjs-control-bar')))
|
||||
actions.context_click(driver.find_element(By.CLASS_NAME ,'video-js')).perform()
|
||||
statsForNerds = driver.find_elements(By.CLASS_NAME ,'vjs-menu-item')
|
||||
actions.click(statsForNerds[-1]).perform()
|
||||
wait.until(ec.text_to_be_present_in_element((By.CLASS_NAME, 'vjs-stats-list'), 'Player'))
|
||||
actions.move_to_element(driver.find_element(By.CLASS_NAME ,'vjs-control-bar')).perform()
|
||||
logger.log(logging.INFO, 'Stats setup complete.')
|
||||
|
||||
return driver
|
||||
|
||||
if __name__ == '__main__':
|
||||
setupLogger()
|
||||
|
||||
driver = setupChromeDriver()
|
||||
# Load plugins
|
||||
plugin_dir = firstValid(args.plugin_dir, os.getenv('PLUGIN_DIR'), default=None)
|
||||
if plugin_dir is None:
|
||||
logger.info("No plugin directory provided. Using default plugins.")
|
||||
setup_plugin = None
|
||||
download_plugin = None
|
||||
else:
|
||||
setup_plugin, download_plugin = load_plugins(plugin_dir)
|
||||
|
||||
# Use default plugins if none are loaded
|
||||
if setup_plugin is None:
|
||||
setup_plugin = DefaultStatsSetupPlugin()
|
||||
logger.info("Using default StatsSetupPlugin.")
|
||||
if download_plugin is None:
|
||||
download_plugin = DefaultStatsDownloadPlugin()
|
||||
logger.info("Using default StatsDownloadPlugin.")
|
||||
|
||||
command_executor = firstValid(args.hub_url, os.getenv('HUB_URL'), default=None)
|
||||
webrtc_internals_path = firstValid(
|
||||
args.webrtc_internals_path,
|
||||
os.getenv('WEBRTC_INTERNALS_PATH'),
|
||||
default=os.path.abspath(os.path.join(os.path.dirname(__file__), 'webrtc-internals-exporter'))
|
||||
)
|
||||
|
||||
driver = setupChromeDriver(command_executor, webrtc_internals_path)
|
||||
|
||||
signal.signal(signal.SIGINT, lambda signum, frame: interrupt_handler(signum, driver))
|
||||
|
||||
url = os.getenv('VIDEO_URL')
|
||||
url = firstValid(args.url, os.getenv('VIDEO_URL'), default=None)
|
||||
if url is None:
|
||||
logger.error('VIDEO_URL environment variable is not set.')
|
||||
logger.error('VIDEO_URL environment variable or --url argument is required.')
|
||||
raise SystemExit(1)
|
||||
|
||||
setupStats(driver, url)
|
||||
# Use the loaded plugin
|
||||
driver = setup_plugin.setup_stats(driver, url)
|
||||
|
||||
logger.log(logging.INFO, 'Starting server collector.')
|
||||
httpd = HTTPServer(('', 9092), partial(Handler, downloadStats, driver, logger))
|
||||
logger.info('Server collector started.')
|
||||
socket_url = firstValid(args.socket_url, os.getenv('SOCKET_URL'), default='localhost')
|
||||
try:
|
||||
socket_port = int(firstValid(args.socket_port, os.getenv('SOCKET_PORT'), default=8094))
|
||||
except ValueError:
|
||||
logger.error('Invalid socket port provided. Exiting.')
|
||||
raise SystemExit(1)
|
||||
|
||||
logger.info('Starting server collector.')
|
||||
httpd = HTTPServer(('', 9092), partial(Handler, download_plugin.download_stats, driver, logger, socket_url, socket_port))
|
||||
httpd.serve_forever()
|
55
monolith-entrypoint.sh
Normal file
55
monolith-entrypoint.sh
Normal file
@@ -0,0 +1,55 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -z "$TELEGRAF_HOSTNAME" ]; then
|
||||
echo "Error: TELEGRAF_HOSTNAME is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$TELEGRAF_MONGODB_DSN" ]; then
|
||||
echo "Error: TELEGRAF_MONGODB_DSN is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$TELEGRAF_MONGODB_DATABASE" ]; then
|
||||
echo "Error: TELEGRAF_MONGODB_DATABASE is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$SOCKET_PORT" ]; then
|
||||
echo "Error: SOCKET_PORT is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$VIDEO_URL" ]; then
|
||||
echo "Error: VIDEO_URL is not set"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Set the environment variables
|
||||
export DSN=$TELEGRAF_MONGODB_DSN
|
||||
export DATABASE=$TELEGRAF_MONGODB_DATABASE
|
||||
export HOSTNAME=$TELEGRAF_HOSTNAME
|
||||
|
||||
# Start the Selenium hub
|
||||
/opt/bin/entry_point.sh > /dev/null 2>&1 &
|
||||
|
||||
# Wait for Selenium hub to be ready
|
||||
printf 'Waiting for Selenium standalone to be ready'
|
||||
timeout=30
|
||||
while ! curl -sSL "http://localhost:4444/wd/hub/status" 2>/dev/null | jq -e '.value.ready' | grep -q true; do
|
||||
printf '.'
|
||||
sleep 1
|
||||
((timeout--))
|
||||
if [ $timeout -le 0 ]; then
|
||||
echo "Error: Selenium standalone did not become ready in time. Exiting..."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
printf '\n'
|
||||
|
||||
# Start the Telegraf agent
|
||||
telegraf --config ./telegraf.conf &
|
||||
|
||||
# Start the main Python script as PID 1
|
||||
exec ./venv/bin/python main.py
|
31
plugins/example_plugin.py
Normal file
31
plugins/example_plugin.py
Normal file
@@ -0,0 +1,31 @@
|
||||
import logging
|
||||
from selenium import webdriver
|
||||
from selenium.webdriver.remote.webdriver import WebDriver as Remote
|
||||
from utils.plugins_base import StatsSetupPlugin, StatsDownloadPlugin
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ExampleStatsSetupPlugin(StatsSetupPlugin):
|
||||
def setup_stats(self, driver: webdriver.Chrome, url: str, retries: int = 5) -> webdriver.Chrome:
|
||||
logger.info("Running ExampleStatsSetupPlugin...")
|
||||
# Here you would implement the custom logic to setup stats
|
||||
# For example, you could click on a button to display stats.
|
||||
# You could also wait for an element to appear before continuing.
|
||||
# This is just an example
|
||||
|
||||
driver.get(url)
|
||||
|
||||
return driver
|
||||
|
||||
class ExampleStatsDownloadPlugin(StatsDownloadPlugin):
|
||||
def download_stats(self, driver: webdriver.Chrome, peersDict: dict, socket_url: str, socket_port: int):
|
||||
logger.info("Running ExampleStatsDownloadPlugin...")
|
||||
stats = {'message': 'Hello from ExampleStatsDownloadPlugin'}
|
||||
# Here you would implement the custom logic to download stats
|
||||
# and send them to the socket.
|
||||
# This is just an example
|
||||
|
||||
print(f"Sending stats: {stats} to {socket_url}:{socket_port}")
|
||||
|
||||
# Remember to call the saveStats method to send the stats to the socket
|
||||
super().saveStats([stats], socket_url, socket_port)
|
@@ -1,2 +1,3 @@
|
||||
selenium
|
||||
beautifulsoup4
|
||||
beautifulsoup4
|
||||
yaspin
|
29
selenium-standalone-stack/README.md
Normal file
29
selenium-standalone-stack/README.md
Normal file
@@ -0,0 +1,29 @@
|
||||
# Selenium standalone grid deployment script
|
||||
|
||||
## Cloud provider
|
||||
|
||||
This script use the services of Hetzner.
|
||||
|
||||
It should be easily modified to use other cloud providers.
|
||||
|
||||
## Dependencies
|
||||
|
||||
You need to install `jq`, `nmap` and `hcloud`, the Hetzner cloud API CLI.
|
||||
|
||||
On Debian
|
||||
```bash
|
||||
apt install jq nmap hcloud-cli
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
Just read the help provided by the script
|
||||
|
||||
```bash
|
||||
./create-selenium-stack.sh -h
|
||||
```
|
||||
|
||||
To remove all servers in the context:
|
||||
```bash
|
||||
./create-selenium-stack.sh -d -y
|
||||
```
|
288
selenium-standalone-stack/create-selenium-stack.sh
Normal file
288
selenium-standalone-stack/create-selenium-stack.sh
Normal file
@@ -0,0 +1,288 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -m # Enable Job Control
|
||||
|
||||
trap 'kill $(jobs -p)' SIGINT
|
||||
|
||||
# Reset
|
||||
NC='\033[0m' # Text Reset
|
||||
|
||||
# Regular Colors
|
||||
Red='\033[0;31m' # Red
|
||||
Green='\033[0;32m' # Green
|
||||
Cyan='\033[0;36m' # Cyan
|
||||
|
||||
if [[ -z $(which hcloud) ]]; then
|
||||
echo -e "${Red}hcloud could not be found in \$PATH!${NC}
|
||||
|
||||
Please put hcloud in \$PATH ($PATH),
|
||||
install it with your package manager
|
||||
or go to https://github.com/hetznercloud/cli/releases to download it."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $(which jq) ]]; then
|
||||
echo -e "${Red}jq could not be found in \$PATH!${NC}
|
||||
|
||||
Please install jq to use this script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z $(which nmap) ]]; then
|
||||
echo -e "${Red}nmap could not be found in \$PATH!${NC}
|
||||
|
||||
Please install nmap to use this script."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
usage() {
|
||||
if hcloud context list | grep -q -v "ACTIVE"; then
|
||||
types=$(hcloud server-type list -o columns=name,cores,cpu_type,memory,storage_type,architecture | grep -v arm | sed -e 's/^/ /')
|
||||
keys=$(hcloud ssh-key list -o columns=name,fingerprint,age | sed -e 's/^/ /')
|
||||
contexts=" Available contexts:
|
||||
$(hcloud context list | sed -e 's/^/ /')"
|
||||
else
|
||||
types="No hcloud context, can’t get server types"
|
||||
keys="No hcloud context, can’t get SSH keys"
|
||||
contexts="No hcloud context available.
|
||||
You can create one with the following command:
|
||||
hcloud create context name_of_the_context
|
||||
Or let this script create one during execution."
|
||||
fi
|
||||
|
||||
cat << EOF
|
||||
$(basename "$0") (c) Framasoft 2023, WTFPL
|
||||
|
||||
USAGE
|
||||
$(basename "$0") [-h] [-d] [-s <int>] [-n <int>] [-t <vps type>] [-c <hcloud context>] -k <ssh-key>
|
||||
|
||||
OPTIONS
|
||||
-h Print this help and exit
|
||||
-d Delete all servers
|
||||
-dy Delete all servers without confirmation
|
||||
-s <int> How many VPS you want to start.
|
||||
Default: 1
|
||||
Maximum should be: limit (hcloud).
|
||||
Default: 1
|
||||
-n <int> How many nodes you want to start on each VPS.
|
||||
Default: 1
|
||||
-t <vps type> The type of VPS to start.
|
||||
Default: cpx21.
|
||||
See below
|
||||
-c <hcloud context> Name of the hcloud context
|
||||
Default: selenium-peertube.
|
||||
See below
|
||||
-k <ssh-key> The ssh key used to connect to the VPS.
|
||||
MANDATORY, no default.Starting node
|
||||
See below.
|
||||
-e <string> The path to the environment file to be copied and used on the VPS.
|
||||
Default: .env
|
||||
|
||||
$types
|
||||
|
||||
HCLOUD CONTEXT
|
||||
It’s the name of the project you want to create your VPS in.
|
||||
|
||||
$contexts
|
||||
|
||||
SSH KEYS
|
||||
You must have a ssh key registered on Hetzner to use this script.
|
||||
To create a key:
|
||||
hcloud ssh-key create --name my-key --public-key-from-file ~/.ssh/id_ed25519.pub
|
||||
|
||||
The ssh keys currently registered on Hetzner are:
|
||||
$keys
|
||||
EOF
|
||||
exit "$1"
|
||||
}
|
||||
|
||||
delete_server() {
|
||||
echo -e "${Cyan}$(hcloud server delete "$1")${NC}"
|
||||
}
|
||||
|
||||
create_nodes_server() {
|
||||
i="$1"
|
||||
TYPE="$2"
|
||||
KEY="$3"
|
||||
REGION="$4"
|
||||
SERVER_NAME="$REGION-node-$i"
|
||||
hcloud server create --start-after-create --name "$SERVER_NAME" --image debian-12 --type "$TYPE" --location "$REGION" --ssh-key "$KEY" > /dev/null
|
||||
echo -e "${Cyan}VPS n°$i created and started${NC}"
|
||||
}
|
||||
|
||||
start_nodes() {
|
||||
i="$1"
|
||||
REGION=$(hcloud server list -o json | jq -r '.[] | select(.name | contains("node-'$i'")) | .datacenter.location.name')
|
||||
SERVER_NAME="$REGION-node-$i"
|
||||
SERVER_IP=$(hcloud server ip "$SERVER_NAME")
|
||||
while [[ $(nmap -p 22 "$SERVER_IP" | grep -c open) -eq 0 ]]; do
|
||||
sleep 1
|
||||
done
|
||||
SSH_CONN="root@$SERVER_IP"
|
||||
scp -o "LogLevel=ERROR" -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" -o "VerifyHostKeyDNS no" start-nodes.sh "${SSH_CONN}:" > /dev/null
|
||||
scp -o "LogLevel=ERROR" -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" -o "VerifyHostKeyDNS no" "$ENV_FILE" "${SSH_CONN}:" > /dev/null
|
||||
ssh -o "LogLevel=ERROR" -o "UserKnownHostsFile /dev/null" -o "StrictHostKeyChecking no" -o "VerifyHostKeyDNS no" "$SSH_CONN" "/root/start-nodes.sh -n \"$NODES\"" > /dev/null
|
||||
echo -e "${Cyan}Nodes created on VPS n°${i}${NC}"
|
||||
}
|
||||
|
||||
CONTEXT=selenium-peertube
|
||||
SERVERS=1
|
||||
NODES=1
|
||||
TYPE=cpx21
|
||||
DELETE=0
|
||||
N_STRING=node
|
||||
FORCE_DELETION=0
|
||||
ENV_FILE=.env
|
||||
|
||||
while getopts "hds:n:t:k:c:y" option; do
|
||||
case $option in
|
||||
h)
|
||||
usage 0
|
||||
;;
|
||||
d)
|
||||
DELETE=1
|
||||
;;
|
||||
s)
|
||||
SERVERS=$OPTARG
|
||||
;;
|
||||
n)
|
||||
NODES=$OPTARG
|
||||
if [[ $NODES -gt 1 ]]; then
|
||||
N_STRING=nodes
|
||||
fi
|
||||
;;
|
||||
t)
|
||||
TYPE=$OPTARG
|
||||
;;
|
||||
k)
|
||||
KEY=$OPTARG
|
||||
;;
|
||||
c)
|
||||
CONTEXT=$OPTARG
|
||||
;;
|
||||
y)
|
||||
FORCE_DELETION=1
|
||||
;;
|
||||
*)
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ $(hcloud context active) != "$CONTEXT" ]]; then
|
||||
echo -e "${Cyan}Hcloud context is not '$CONTEXT'!${NC}"
|
||||
if hcloud context list | grep -q -F "$CONTEXT"; then
|
||||
echo -e "${Green}Selecting hcloud context ${CONTEXT}${NC}"
|
||||
hcloud context use "$CONTEXT"
|
||||
else
|
||||
echo -e "${Red}Hcloud context ${CONTEXT} does not exist.${NC}
|
||||
${Cyan}Will now try to create the context ${CONTEXT}${NC}"
|
||||
hcloud context create "$CONTEXT"
|
||||
fi
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $DELETE -eq 1 ]]; then
|
||||
SERVERS=$(hcloud server list -o json)
|
||||
if [[ $SERVERS == 'null' ]]; then
|
||||
echo -e "${Cyan}No VPS to delete.${NC}"
|
||||
exit 0
|
||||
fi
|
||||
NAMES=$(echo "$SERVERS" | jq -r '.[] | .name' | sort -h)
|
||||
echo -e "${Red}You are about to delete the following VPS${NC}:"
|
||||
echo "$NAMES"
|
||||
if [[ $FORCE_DELETION -eq 1 ]]; then
|
||||
confirm="yes"
|
||||
else
|
||||
echo -e -n "${Cyan}Please confirm the deletion by typing '${NC}${Red}yes${NC}': "
|
||||
read -r confirm
|
||||
fi
|
||||
if [[ $confirm == 'yes' ]]; then
|
||||
for i in $NAMES; do
|
||||
echo -e "${Cyan}Starting server $i deletion${NC}"
|
||||
delete_server "$i" &
|
||||
done
|
||||
# Wait for all delete_server jobs to finish
|
||||
while true; do
|
||||
fg > /dev/null 2>&1
|
||||
[ $? == 1 ] && break
|
||||
done
|
||||
if [[ $(hcloud server list -o json) == '[]' ]]; then
|
||||
echo -e "${Green}All servers have been deleted${NC}"
|
||||
else
|
||||
echo -e "${Red}Some servers have not been deleted:${NC}"
|
||||
hcloud server list
|
||||
fi
|
||||
else
|
||||
echo "Deletion cancelled."
|
||||
fi
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ -z $KEY ]]; then
|
||||
echo -e "${Red}You must choose a ssh key!${NC}\n"
|
||||
usage 1
|
||||
fi
|
||||
|
||||
KEY_FOUND=0
|
||||
for i in $(hcloud ssh-key list -o json | jq -r '.[] | .name'); do
|
||||
if [[ $i == "$KEY" ]]; then
|
||||
KEY_FOUND=1
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $KEY_FOUND -eq 0 ]]; then
|
||||
echo -e "${Red}The chosen ssh key is not registered on Hetzner!${NC}\n"
|
||||
usage 1
|
||||
fi
|
||||
|
||||
if hcloud server list | grep -q -v NAME; then
|
||||
echo -e "${Red}There already are servers in the context! Exiting.${NC}\nList of the servers:"
|
||||
hcloud server list
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ ! -f "$ENV_FILE" ]]; then
|
||||
echo -e "${Red}Environment file '$ENV_FILE' does not exist!${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${Green}Creating $SERVERS VPS${NC}"
|
||||
REGIONS=($(hcloud location list -o json | jq -r '.[] | select(.name != "fsn1") | .name' | shuf))
|
||||
for i in $(seq 1 "$SERVERS"); do
|
||||
REGION=${REGIONS[$((i % ${#REGIONS[@]}))]}
|
||||
echo -e "${Cyan}Creating VPS n°$i in $REGION"
|
||||
create_nodes_server "$i" "$TYPE" "$KEY" "$REGION" &
|
||||
done
|
||||
|
||||
# Wait for all create_nodes_server jobs to finish
|
||||
while true; do
|
||||
fg > /dev/null 2>&1
|
||||
[ $? == 1 ] && break
|
||||
done
|
||||
|
||||
echo -e "${Green}Starting nodes on $SERVERS VPS ($NODES $N_STRING each)${NC}"
|
||||
for i in $(seq 1 "$SERVERS"); do
|
||||
echo -e "${Cyan}Starting $N_STRING on VPS n°$i${NC}"
|
||||
start_nodes "$i" &
|
||||
done
|
||||
|
||||
echo -e "${Green}Waiting for all nodes to be started${NC}"
|
||||
|
||||
# Wait for all start_nodes jobs to finish
|
||||
while true; do
|
||||
fg > /dev/null 2>&1
|
||||
[ $? == 1 ] && break
|
||||
done
|
||||
|
||||
echo -e "${Green}All the servers and nodes have been created and started!
|
||||
|
||||
Number of servers: $SERVERS
|
||||
Number of nodes per server: $NODES
|
||||
Type of the servers:
|
||||
nodes servers: $TYPE
|
||||
|
||||
You can remove all servers with the following command
|
||||
$0 -d${NC}"
|
126
selenium-standalone-stack/start-nodes.sh
Normal file
126
selenium-standalone-stack/start-nodes.sh
Normal file
@@ -0,0 +1,126 @@
|
||||
#!/bin/bash
|
||||
|
||||
usage() {
|
||||
cat << EOF
|
||||
$(basename "$0") (c) Framasoft 2023, WTPF
|
||||
|
||||
USAGE
|
||||
$(basename "$0") [-h] [-n <int>]
|
||||
|
||||
OPTIONS
|
||||
-h print this help and exit
|
||||
-n <int> how many selenium nodes you want to launch. Default: 1
|
||||
-e <string> the environment file path to use. Default: .env
|
||||
EOF
|
||||
exit "$1"
|
||||
}
|
||||
|
||||
NUMBER=1
|
||||
ENV_FILE=".env"
|
||||
|
||||
while getopts "hn:i:" option; do
|
||||
case $option in
|
||||
h)
|
||||
usage 0
|
||||
;;
|
||||
n)
|
||||
NUMBER=$OPTARG
|
||||
;;
|
||||
e)
|
||||
ENV_FILE=$OPTARG
|
||||
;;
|
||||
*)
|
||||
usage 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
HOST=$(hostname)
|
||||
|
||||
DEBIAN_FRONTEND=noninteractive
|
||||
export DEBIAN_FRONTEND
|
||||
|
||||
echo "Installing packages"
|
||||
apt-get -qq -y update
|
||||
apt-get -qq -y dist-upgrade
|
||||
apt-get -qq -y install jq \
|
||||
tmux \
|
||||
vim \
|
||||
multitail \
|
||||
htop \
|
||||
liquidprompt \
|
||||
coreutils \
|
||||
apparmor-utils \
|
||||
docker.io \
|
||||
|
||||
echo "Activating liquidprompt"
|
||||
liquidprompt_activate
|
||||
. /usr/share/liquidprompt/liquidprompt
|
||||
|
||||
echo "Modifying kernel parameters"
|
||||
sysctl net.ipv6.conf.default.forwarding=1
|
||||
sysctl net.ipv6.conf.all.forwarding=1
|
||||
|
||||
echo "Configuring Docker for IPv6"
|
||||
IP_ADDR=$(ip --json a show eth0 | jq '.[] | .addr_info | .[] | select(.family | contains("inet6")) | select(.scope | contains("global")) | .local' -r)
|
||||
NETWORK=$(echo "$IP_ADDR" | sed -e 's@:[^:]\+$@8000::/65@')
|
||||
|
||||
cat << EOF > /etc/docker/daemon.json
|
||||
{
|
||||
"ipv6": true,
|
||||
"fixed-cidr-v6": "$NETWORK"
|
||||
}
|
||||
EOF
|
||||
systemctl restart docker
|
||||
|
||||
echo "Starting $NUMBER Selenium nodes"
|
||||
|
||||
for NB in $(seq 1 "$NUMBER"); do
|
||||
NODE_NAME="selenium-${HOST}-instance-${NB}"
|
||||
|
||||
# Replace variables in the environment file
|
||||
TEMP_ENV_FILE=$(mktemp)
|
||||
while IFS= read -r line; do
|
||||
eval "echo \"$line\""
|
||||
done < "$ENV_FILE" > "$TEMP_ENV_FILE"
|
||||
ENV_FILE="$TEMP_ENV_FILE"
|
||||
|
||||
echo "Starting Selenium node n°$NB"
|
||||
docker run --rm \
|
||||
--env-file $ENV_FILE \
|
||||
--name "$NODE_NAME" \
|
||||
--pull always \
|
||||
--shm-size="2g" \
|
||||
-d \
|
||||
kobimex/peertube-collector-monolith:latest > /dev/null 2>&1
|
||||
|
||||
# Wait until the container gets an IPv6 address.
|
||||
DOCKER_IP=""
|
||||
for i in {1..10}; do
|
||||
DOCKER_IP=$(docker inspect -f '{{range.NetworkSettings.Networks}}{{.GlobalIPv6Address}}{{end}}' "$NODE_NAME")
|
||||
if [ -n "$DOCKER_IP" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [ -z "$DOCKER_IP" ]; then
|
||||
echo "Error: Could not retrieve a valid IPv6 address for $NODE_NAME." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Adding Selenium node n°$NB to neighbour proxy"
|
||||
ip -6 neighbour add proxy "$DOCKER_IP" dev eth0
|
||||
docker stop "$NODE_NAME"
|
||||
sleep 1
|
||||
|
||||
docker run --rm \
|
||||
--env-file $ENV_FILE \
|
||||
--name "$NODE_NAME" \
|
||||
--pull always \
|
||||
--shm-size="2g" \
|
||||
-d \
|
||||
-p 790$NB:790$NB \
|
||||
-e "SE_NO_VNC_PORT=790$NB" \
|
||||
kobimex/peertube-collector-monolith:latest > /dev/null 2>&1
|
||||
done
|
5
server/.env.example
Normal file
5
server/.env.example
Normal file
@@ -0,0 +1,5 @@
|
||||
# MongoDB Environment
|
||||
MONGO_INITDB_ROOT_USERNAME=root
|
||||
MONGO_INITDB_ROOT_PASSWORD=password
|
||||
MONGO_EXPRESS_USERNAME=admin
|
||||
MONGO_EXPRESS_PASSWORD=password
|
30
server/README.md
Normal file
30
server/README.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# Server
|
||||
|
||||
The repository contains a `server` directory with a simple MongoDB server (with initializations scripts) and WebUI that serves the WebRTC stats collected by the collector.
|
||||
|
||||
It's not mandatory to run and use this service, it's provided just as an example of how to store collected data.
|
||||
|
||||
## Setup
|
||||
|
||||
1. Change to the `server` directory:
|
||||
```sh
|
||||
cd server
|
||||
```
|
||||
|
||||
2. Create and configure the environment file based on the `.env.example` file:
|
||||
```sh
|
||||
cp .env.example .env
|
||||
```
|
||||
|
||||
3. Start the Docker containers:
|
||||
```sh
|
||||
docker compose up
|
||||
```
|
||||
|
||||
The WebUI control panel will be available at [http://localhost:8081](http://localhost:8081).
|
||||
|
||||
# Credits
|
||||
|
||||
- [WebRTC Internals Exporter](https://github.com/vpalmisano/webrtc-internals-exporter)
|
||||
- [WebRTC debugging with Prometheus/Grafana](https://medium.com/@vpalmisano/webrtc-debugging-with-prometheus-grafana-254b6ac71063)
|
||||
- [MongoDB Docker Compose examples](https://github.com/TGITS/docker-compose-examples/tree/main/mongodb-docker-compose-examples)
|
49
server/docker-compose.yml
Normal file
49
server/docker-compose.yml
Normal file
@@ -0,0 +1,49 @@
|
||||
services:
|
||||
mongodb:
|
||||
image: mongo:latest
|
||||
container_name: mongodb
|
||||
hostname: mongodb
|
||||
volumes:
|
||||
- ./mongodb/initdb.d/mongo-init.js:/docker-entrypoint-initdb.d/mongo-init.js:ro
|
||||
- mongodb-data:/data/db/
|
||||
- mongodb-log:/var/log/mongodb/
|
||||
env_file:
|
||||
- .env
|
||||
environment:
|
||||
MONGO_INITDB_ROOT_USERNAME: ${MONGO_INITDB_ROOT_USERNAME}
|
||||
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_INITDB_ROOT_PASSWORD}
|
||||
ports:
|
||||
- "27017:27017"
|
||||
networks:
|
||||
- mongodb_network
|
||||
|
||||
mongo-express:
|
||||
image: mongo-express:latest
|
||||
container_name: mongo-express
|
||||
restart: always
|
||||
environment:
|
||||
ME_CONFIG_MONGODB_ADMINUSERNAME: ${MONGO_INITDB_ROOT_USERNAME}
|
||||
ME_CONFIG_MONGODB_ADMINPASSWORD: ${MONGO_INITDB_ROOT_PASSWORD}
|
||||
ME_CONFIG_MONGODB_PORT: 27017
|
||||
ME_CONFIG_MONGODB_SERVER: 'mongodb'
|
||||
ME_CONFIG_BASICAUTH_USERNAME: ${MONGO_EXPRESS_USERNAME}
|
||||
ME_CONFIG_BASICAUTH_PASSWORD: ${MONGO_EXPRESS_PASSWORD}
|
||||
ports:
|
||||
- 8081:8081
|
||||
networks:
|
||||
- mongodb_network
|
||||
depends_on:
|
||||
- mongodb
|
||||
|
||||
volumes:
|
||||
mongodb-data:
|
||||
driver: local
|
||||
name: mongo-data
|
||||
mongodb-log:
|
||||
driver: local
|
||||
name: mongo-log
|
||||
|
||||
networks:
|
||||
mongodb_network:
|
||||
driver: bridge
|
||||
name: mongo-network
|
33
server/mongodb/initdb.d/mongo-init.js
Normal file
33
server/mongodb/initdb.d/mongo-init.js
Normal file
@@ -0,0 +1,33 @@
|
||||
db = db.getSiblingDB("statistics");
|
||||
|
||||
db.createRole({
|
||||
role: "statsReadWrite",
|
||||
privileges: [
|
||||
{
|
||||
resource: {
|
||||
db: "statistics",
|
||||
collection: "peertube",
|
||||
},
|
||||
actions: ["insert"],
|
||||
},
|
||||
],
|
||||
roles: [
|
||||
{
|
||||
role: "read",
|
||||
db: "statistics",
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
db.createUser({
|
||||
user: "stats_user",
|
||||
pwd: "@z^VFhN7q%vzit",
|
||||
roles: [
|
||||
{
|
||||
role: 'statsReadWrite',
|
||||
db: 'statistics',
|
||||
},
|
||||
],
|
||||
});
|
||||
|
||||
db.createCollection("peertube");
|
BIN
server/peertube data/statistics.peertube_hetzner_default_latency.json
(Stored with Git LFS)
Normal file
BIN
server/peertube data/statistics.peertube_hetzner_default_latency.json
(Stored with Git LFS)
Normal file
Binary file not shown.
BIN
server/peertube data/statistics.peertube_hetzner_high_latency.json
(Stored with Git LFS)
Normal file
BIN
server/peertube data/statistics.peertube_hetzner_high_latency.json
(Stored with Git LFS)
Normal file
Binary file not shown.
@@ -7,7 +7,7 @@
|
||||
dedup_interval = "600s"
|
||||
|
||||
[[inputs.socket_listener]]
|
||||
service_address = "udp://:8094"
|
||||
service_address = "udp://:${SOCKET_PORT}"
|
||||
data_format = "xpath_json"
|
||||
[[inputs.socket_listener.xpath]]
|
||||
metric_name = "'peertube'"
|
||||
|
5
utils/Convenience.py
Normal file
5
utils/Convenience.py
Normal file
@@ -0,0 +1,5 @@
|
||||
def firstValid(*args, default):
|
||||
for arg in args:
|
||||
if arg is not None:
|
||||
return arg
|
||||
return default
|
@@ -3,10 +3,12 @@ import logging
|
||||
from http.server import BaseHTTPRequestHandler
|
||||
|
||||
class Handler(BaseHTTPRequestHandler):
|
||||
def __init__(self, custom_func, driver, logger, *args, **kwargs):
|
||||
def __init__(self, custom_func, driver, logger, socket_url, socket_port, *args, **kwargs):
|
||||
self._custom_func = custom_func
|
||||
self.logger = logger
|
||||
self.driver = driver
|
||||
self._socket_url = socket_url
|
||||
self._socket_port = socket_port
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def do_POST(self):
|
||||
@@ -14,7 +16,7 @@ class Handler(BaseHTTPRequestHandler):
|
||||
content_length = int(self.headers['Content-Length'])
|
||||
post_data = self.rfile.read(content_length)
|
||||
self.logger.log(logging.DEBUG, f"POST request,\nPath: {self.path}\nHeaders:\n{self.headers}\n\nBody:\n{post_data.decode('utf-8')}")
|
||||
self._custom_func(self.driver, json.loads(post_data.decode('utf-8')))
|
||||
self._custom_func(self.driver, json.loads(post_data.decode('utf-8')), self._socket_url, self._socket_port)
|
||||
self.send_response(200)
|
||||
self.end_headers()
|
||||
self.wfile.write(b'POST request received')
|
||||
@@ -24,6 +26,11 @@ class Handler(BaseHTTPRequestHandler):
|
||||
self.wfile.write(b'404 Not Found')
|
||||
|
||||
def do_GET(self):
|
||||
self.send_response(404)
|
||||
self.end_headers()
|
||||
self.wfile.write(b'404 Not Found')
|
||||
if self.path == '/heartbeat':
|
||||
self.send_response(200)
|
||||
self.end_headers()
|
||||
self.wfile.write(b'Heartbeat OK')
|
||||
else:
|
||||
self.send_response(404)
|
||||
self.end_headers()
|
||||
self.wfile.write(b'404 Not Found')
|
29
utils/plugins_base.py
Normal file
29
utils/plugins_base.py
Normal file
@@ -0,0 +1,29 @@
|
||||
import abc
|
||||
import json
|
||||
import socket
|
||||
import logging
|
||||
from selenium import webdriver
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Abstract Base Classes for Plugins
|
||||
class StatsSetupPlugin(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def setup_stats(self, driver: webdriver.Remote | webdriver.Chrome, url: str, retries: int = 5) -> webdriver.Remote | webdriver.Chrome:
|
||||
pass
|
||||
|
||||
class StatsDownloadPlugin(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def download_stats(self, driver: webdriver.Remote | webdriver.Chrome, peersDict: dict, socket_url: str, socket_port: int):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def saveStats(stats: list, socket_url: str, socket_port: int):
|
||||
try:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
logger.debug(f'Saving stats: {json.dumps(stats, indent=4)}')
|
||||
sock.sendto(json.dumps(stats).encode(), (socket_url, socket_port))
|
||||
sock.close()
|
||||
logger.debug('Sent stats to socket.')
|
||||
except socket.error as e:
|
||||
logger.error(f'Got socket error: {e}')
|
1
webrtc-internals-exporter/background.bundle.js
Normal file
1
webrtc-internals-exporter/background.bundle.js
Normal file
File diff suppressed because one or more lines are too long
@@ -6,17 +6,17 @@ function log(...args) {
|
||||
|
||||
log("loaded");
|
||||
|
||||
import "/assets/pako.min.js";
|
||||
import "./assets/pako.min.js";
|
||||
|
||||
const DEFAULT_OPTIONS = {
|
||||
url: "http://localhost:9092",
|
||||
url: process.env.WEBRTC_INTERNALS_EXPORTER_URL + ":9092",
|
||||
username: "",
|
||||
password: "",
|
||||
updateInterval: 2,
|
||||
gzip: false,
|
||||
job: "webrtc-internals-exporter",
|
||||
enabledOrigins: { },
|
||||
enabledStats: ["data-channel", "local-candidate", "remote-candidate"]
|
||||
enabledOrigins: {},
|
||||
enabledStats: ["data-channel", "local-candidate", "remote-candidate", "candidate-pair"]
|
||||
};
|
||||
|
||||
const options = {};
|
||||
@@ -33,11 +33,6 @@ chrome.runtime.onInstalled.addListener(async ({ reason }) => {
|
||||
...options,
|
||||
});
|
||||
}
|
||||
|
||||
await chrome.alarms.create("webrtc-internals-exporter-alarm", {
|
||||
delayInMinutes: 1,
|
||||
periodInMinutes: 1,
|
||||
});
|
||||
});
|
||||
|
||||
async function updateTabInfo(tab) {
|
||||
@@ -104,76 +99,8 @@ chrome.tabs.onUpdated.addListener(async (tabId, changeInfo) => {
|
||||
await updateTabInfo({ id: tabId, url: changeInfo.url });
|
||||
});
|
||||
|
||||
chrome.alarms.onAlarm.addListener((alarm) => {
|
||||
if (alarm.name === "webrtc-internals-exporter-alarm") {
|
||||
cleanupPeerConnections().catch((err) => {
|
||||
log(`cleanup peer connections error: ${err.message}`);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
async function setPeerConnectionLastUpdate({ id, origin }, lastUpdate = 0) {
|
||||
let { peerConnectionsLastUpdate } = await chrome.storage.local.get(
|
||||
"peerConnectionsLastUpdate",
|
||||
);
|
||||
if (!peerConnectionsLastUpdate) {
|
||||
peerConnectionsLastUpdate = {};
|
||||
}
|
||||
if (lastUpdate) {
|
||||
peerConnectionsLastUpdate[id] = { origin, lastUpdate };
|
||||
} else {
|
||||
delete peerConnectionsLastUpdate[id];
|
||||
}
|
||||
await chrome.storage.local.set({ peerConnectionsLastUpdate });
|
||||
|
||||
const peerConnectionsPerOrigin = {};
|
||||
Object.values(peerConnectionsLastUpdate).forEach(({ origin: o }) => {
|
||||
if (!peerConnectionsPerOrigin[o]) {
|
||||
peerConnectionsPerOrigin[o] = 0;
|
||||
}
|
||||
peerConnectionsPerOrigin[o]++;
|
||||
});
|
||||
await chrome.storage.local.set({ peerConnectionsPerOrigin });
|
||||
await optionsUpdated();
|
||||
}
|
||||
|
||||
async function cleanupPeerConnections() {
|
||||
let { peerConnectionsLastUpdate } = await chrome.storage.local.get(
|
||||
"peerConnectionsLastUpdate",
|
||||
);
|
||||
if (
|
||||
!peerConnectionsLastUpdate ||
|
||||
!Object.keys(peerConnectionsLastUpdate).length
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
log(
|
||||
`checking stale peer connections (${
|
||||
Object.keys(peerConnectionsLastUpdate).length
|
||||
} total)`,
|
||||
);
|
||||
const now = Date.now();
|
||||
await Promise.allSettled(
|
||||
Object.entries(peerConnectionsLastUpdate)
|
||||
.map(([id, { origin, lastUpdate }]) => {
|
||||
if (
|
||||
now - lastUpdate >
|
||||
Math.max(2 * options.updateInterval, 30) * 1000
|
||||
) {
|
||||
return { id, origin };
|
||||
}
|
||||
})
|
||||
.filter((ret) => !!ret?.id)
|
||||
.map(({ id, origin }) => {
|
||||
log(`removing stale peer connection metrics: ${id} ${origin}`);
|
||||
return sendData("DELETE", { id, origin });
|
||||
}),
|
||||
);
|
||||
}
|
||||
|
||||
// Send data to pushgateway.
|
||||
async function sendData(method, { id, origin }, data) {
|
||||
// Send data to POST handler.
|
||||
async function sendJsonData(method, data) {
|
||||
const { url, username, password, gzip, job } = options;
|
||||
const headers = {
|
||||
"Content-Type": "application/json",
|
||||
@@ -185,59 +112,7 @@ async function sendData(method, { id, origin }, data) {
|
||||
headers["Content-Encoding"] = "gzip";
|
||||
data = await pako.gzip(data);
|
||||
}
|
||||
log(`sendData: ${data} \n ${data.length} bytes (gzip: ${gzip}) url: ${url} job: ${job}`);
|
||||
const start = Date.now();
|
||||
const response = await fetch(
|
||||
`${url}/metrics/job/${job}/peerConnectionId/${id}`,
|
||||
{
|
||||
method,
|
||||
headers,
|
||||
body: method === "POST" ? data : undefined,
|
||||
},
|
||||
);
|
||||
|
||||
const stats = await chrome.storage.local.get([
|
||||
"messagesSent",
|
||||
"bytesSent",
|
||||
"totalTime",
|
||||
"errors",
|
||||
]);
|
||||
if (data) {
|
||||
stats.messagesSent = (stats.messagesSent || 0) + 1;
|
||||
stats.bytesSent = (stats.bytesSent || 0) + data.length;
|
||||
stats.totalTime = (stats.totalTime || 0) + Date.now() - start;
|
||||
}
|
||||
if (!response.ok) {
|
||||
stats.errors = (stats.errors || 0) + 1;
|
||||
}
|
||||
await chrome.storage.local.set(stats);
|
||||
|
||||
if (!response.ok) {
|
||||
const text = await response.text();
|
||||
throw new Error(`Response status: ${response.status} error: ${text}`);
|
||||
}
|
||||
|
||||
await setPeerConnectionLastUpdate(
|
||||
{ id, origin },
|
||||
method === "POST" ? start : undefined,
|
||||
);
|
||||
|
||||
return response.text();
|
||||
}
|
||||
|
||||
async function sendJsonData(method, { id, origin }, data) {
|
||||
const { url, username, password, gzip, job } = options;
|
||||
const headers = {
|
||||
"Content-Type": "application/json",
|
||||
};
|
||||
if (username && password) {
|
||||
headers.Authorization = "Basic " + btoa(`${username}:${password}`);
|
||||
}
|
||||
if (data && gzip) {
|
||||
headers["Content-Encoding"] = "gzip";
|
||||
data = await pako.gzip(data);
|
||||
}
|
||||
log(`sendData: ${data} \n ${data.length} bytes (gzip: ${gzip}) url: ${url} job: ${job}`);
|
||||
log(`sendJsonData: ${data} \n ${data.length} bytes (gzip: ${gzip}) url: ${url} job: ${job}`);
|
||||
const start = Date.now();
|
||||
const response = await fetch(
|
||||
`${url}/${job}`,
|
||||
@@ -269,90 +144,13 @@ async function sendJsonData(method, { id, origin }, data) {
|
||||
throw new Error(`Response status: ${response.status} error: ${text}`);
|
||||
}
|
||||
|
||||
await setPeerConnectionLastUpdate(
|
||||
{ id, origin },
|
||||
method === "POST" ? start : undefined,
|
||||
);
|
||||
|
||||
return response.text();
|
||||
}
|
||||
|
||||
const QualityLimitationReasons = {
|
||||
none: 0,
|
||||
bandwidth: 1,
|
||||
cpu: 2,
|
||||
other: 3,
|
||||
};
|
||||
|
||||
/**
|
||||
* sendPeerConnectionStats
|
||||
* @param {string} url
|
||||
* @param {string} id
|
||||
* @param {RTCPeerConnectionState} state
|
||||
* @param {any} values
|
||||
*/
|
||||
async function sendPeerConnectionStats(url, id, state, values) {
|
||||
const origin = new URL(url).origin;
|
||||
|
||||
if (state === "closed") {
|
||||
return sendData("DELETE", { id, origin });
|
||||
}
|
||||
|
||||
let data = "";
|
||||
const sentTypes = new Set();
|
||||
|
||||
values.forEach((value) => {
|
||||
const type = value.type.replace(/-/g, "_");
|
||||
const labels = [`pageUrl="${url}"`];
|
||||
const metrics = [];
|
||||
|
||||
if (value.type === "peer-connection") {
|
||||
labels.push(`state="${state}"`);
|
||||
}
|
||||
|
||||
Object.entries(value).forEach(([key, v]) => {
|
||||
if (typeof v === "number") {
|
||||
metrics.push([key, v]);
|
||||
} else if (typeof v === "object") {
|
||||
Object.entries(v).forEach(([subkey, subv]) => {
|
||||
if (typeof subv === "number") {
|
||||
metrics.push([`${key}_${subkey}`, subv]);
|
||||
}
|
||||
});
|
||||
} else if (
|
||||
key === "qualityLimitationReason" &&
|
||||
QualityLimitationReasons[v] !== undefined
|
||||
) {
|
||||
metrics.push([key, QualityLimitationReasons[v]]);
|
||||
} else if (key === "googTimingFrameInfo") {
|
||||
// TODO
|
||||
} else {
|
||||
labels.push(`${key}="${v}"`);
|
||||
}
|
||||
});
|
||||
|
||||
metrics.forEach(([key, v]) => {
|
||||
const name = `${type}_${key.replace(/-/g, "_")}`;
|
||||
let typeDesc = "";
|
||||
|
||||
if (!sentTypes.has(name)) {
|
||||
typeDesc = `# TYPE ${name} gauge\n`;
|
||||
sentTypes.add(name);
|
||||
}
|
||||
data += `${typeDesc}${name}{${labels.join(",")}} ${v}\n`;
|
||||
});
|
||||
});
|
||||
|
||||
if (data.length > 0) {
|
||||
return sendData("POST", { id, origin }, data + "\n");
|
||||
}
|
||||
}
|
||||
|
||||
chrome.runtime.onMessage.addListener((message, sender, sendResponse) => {
|
||||
if (message.event === "peer-connection-stats") {
|
||||
const { url, id, state, values } = message.data;
|
||||
|
||||
sendData("POST", { id, origin: new URL(url).origin }, JSON.stringify(message.data))
|
||||
sendJsonData("POST", JSON.stringify(message.data))
|
||||
.then(() => {
|
||||
sendResponse({});
|
||||
})
|
||||
@@ -360,9 +158,14 @@ chrome.runtime.onMessage.addListener((message, sender, sendResponse) => {
|
||||
sendResponse({ error: err.message });
|
||||
});
|
||||
} else if (message.event === "peer-connections-stats") {
|
||||
const { stats } = message.data;
|
||||
|
||||
sendJsonData("POST", { id: "all", origin: "all" }, JSON.stringify(message.data))
|
||||
sendJsonData("POST", JSON.stringify(message.data))
|
||||
.then(() => {
|
||||
sendResponse({});
|
||||
})
|
||||
.catch((err) => {
|
||||
sendResponse({ error: err.message });
|
||||
});
|
||||
} else {
|
||||
sendResponse({ error: "unknown event" });
|
||||
}
|
||||
|
@@ -79,19 +79,14 @@ if (window.location.protocol.startsWith("http")) {
|
||||
|
||||
// Handle stats messages.
|
||||
window.addEventListener("message", async (message) => {
|
||||
const { event, url, id, state, values, stats } = message.data;
|
||||
const { event, data, stats } = message.data;
|
||||
if (event === "webrtc-internal-exporter:ready") {
|
||||
sendOptions();
|
||||
} else if (event === "webrtc-internal-exporter:peer-connection-stats") {
|
||||
try {
|
||||
const response = await chrome.runtime.sendMessage({
|
||||
event: "peer-connection-stats",
|
||||
data: {
|
||||
url,
|
||||
id,
|
||||
state,
|
||||
values,
|
||||
},
|
||||
data: stats,
|
||||
});
|
||||
if (response.error) {
|
||||
log(`error: ${response.error}`);
|
||||
@@ -103,7 +98,7 @@ if (window.location.protocol.startsWith("http")) {
|
||||
try {
|
||||
const response = await chrome.runtime.sendMessage({
|
||||
event: "peer-connections-stats",
|
||||
data: stats,
|
||||
data: data,
|
||||
});
|
||||
if (response.error) {
|
||||
log(`error: ${response.error}`);
|
||||
|
@@ -40,7 +40,7 @@
|
||||
}
|
||||
],
|
||||
"background": {
|
||||
"service_worker": "background.js",
|
||||
"service_worker": "background.bundle.js",
|
||||
"type": "module"
|
||||
},
|
||||
"web_accessible_resources": [
|
||||
|
@@ -22,7 +22,7 @@ class WebrtcInternalExporter {
|
||||
});
|
||||
|
||||
window.postMessage({ event: "webrtc-internal-exporter:ready" });
|
||||
this.collectAllStats();
|
||||
setInterval(() => this.collectAndPostAllStats(), this.updateInterval);
|
||||
}
|
||||
|
||||
randomId() {
|
||||
@@ -31,63 +31,109 @@ class WebrtcInternalExporter {
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param {RTCPeerConnection} pc
|
||||
*/
|
||||
add(pc) {
|
||||
const id = this.randomId();
|
||||
pc.iceCandidates = [];
|
||||
pc.iceCandidateErrors = [];
|
||||
this.peerConnections.set(id, pc);
|
||||
pc.addEventListener("connectionstatechange", () => {
|
||||
log(`connectionStateChange: ${pc.connectionState}`);
|
||||
this.collectAndPostAllStats();
|
||||
|
||||
if (pc.connectionState === "closed") {
|
||||
this.peerConnections.delete(id);
|
||||
}
|
||||
});
|
||||
//this.collectAndPostStats(id);
|
||||
|
||||
/**
|
||||
* @param {RTCPeerConnectionIceErrorEvent} event
|
||||
*/
|
||||
pc.addEventListener("icecandidateerror", (event) => {
|
||||
this.peerConnections.get(id).iceCandidateErrors.push({
|
||||
timestamp: Date.now(),
|
||||
address: event.errorAddress,
|
||||
errorCode: event.errorCode,
|
||||
errorText: event.errorText,
|
||||
port: event.errorPort,
|
||||
url: event.url,
|
||||
});
|
||||
});
|
||||
|
||||
/**
|
||||
* @param {RTCPeerConnectionIceEvent} event
|
||||
*/
|
||||
pc.addEventListener("icecandidate", (event) => {
|
||||
this.peerConnections.get(id).iceCandidates.push({
|
||||
timestamp: Date.now(),
|
||||
candidate: event.candidate?.candidate,
|
||||
component: event.candidate?.component,
|
||||
foundation: event.candidate?.foundation,
|
||||
port: event.candidate?.port,
|
||||
priority: event.candidate?.priority,
|
||||
protocol: event.candidate?.protocol,
|
||||
relatedAddress: event.candidate?.relatedAddress,
|
||||
relatedPort: event.candidate?.relatedPort,
|
||||
sdpMLineIndex: event.candidate?.sdpMLineIndex,
|
||||
sdpMid: event.candidate?.sdpMid,
|
||||
tcpType: event.candidate?.tcpType,
|
||||
type: event.candidate?.type,
|
||||
usernameFragment: event.candidate?.usernameFragment,
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async collectAndPostSingleStat(id) {
|
||||
const stats = await this.collectStats(id, this.collectAndPostSingleStat);
|
||||
const stats = await this.collectStats(id);
|
||||
if (Object.keys(stats).length === 0 || !stats) return;
|
||||
|
||||
window.postMessage(
|
||||
{
|
||||
event: "webrtc-internal-exporter:peer-connection-stats",
|
||||
...stats,
|
||||
stats: [stats]
|
||||
},
|
||||
[stats],
|
||||
[stats]
|
||||
);
|
||||
|
||||
log(`Single stat collected:`, [stats]);
|
||||
}
|
||||
|
||||
async collectAllStats() {
|
||||
async collectAndPostAllStats() {
|
||||
const stats = [];
|
||||
|
||||
for (const [id, pc] of this.peerConnections) {
|
||||
if (this.url && this.enabled && pc.connectionState === "connected") {
|
||||
const pcStats = await this.collectStats(id, pc);
|
||||
for (const [id] of this.peerConnections) {
|
||||
if (this.url && this.enabled) {
|
||||
const pcStats = await this.collectStats(id);
|
||||
if (Object.keys(pcStats).length === 0 || !pcStats) continue;
|
||||
stats.push(pcStats);
|
||||
}
|
||||
}
|
||||
|
||||
window.postMessage(
|
||||
{
|
||||
event: "webrtc-internal-exporter:peer-connections-stats",
|
||||
stats,
|
||||
},
|
||||
[stats],
|
||||
);
|
||||
window.postMessage(
|
||||
{
|
||||
event: "webrtc-internal-exporter:peer-connections-stats",
|
||||
data: stats
|
||||
},
|
||||
stats
|
||||
);
|
||||
|
||||
log(`Stats collected:`, stats);
|
||||
|
||||
setTimeout(this.collectAllStats.bind(this), this.updateInterval);
|
||||
return stats;
|
||||
log(`Stats collected:`, stats);
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
async collectStats(id, pc, binding) {
|
||||
/**
|
||||
* @param {string} id
|
||||
*/
|
||||
async collectStats(id) {
|
||||
var pc = this.peerConnections.get(id);
|
||||
if (!pc) return;
|
||||
|
||||
var completeStats = {};
|
||||
|
||||
if (!pc) {
|
||||
pc = this.peerConnections.get(id);
|
||||
if (!pc) return;
|
||||
}
|
||||
|
||||
if (this.url && this.enabled && pc.connectionState === "connected") {
|
||||
if (this.url && this.enabled) {
|
||||
try {
|
||||
const stats = await pc.getStats();
|
||||
const values = [...stats.values()].filter(
|
||||
@@ -98,7 +144,12 @@ class WebrtcInternalExporter {
|
||||
completeStats = {
|
||||
url: window.location.href,
|
||||
id,
|
||||
state: pc.connectionState,
|
||||
connectionState: pc.connectionState,
|
||||
iceConnectionState: pc.iceConnectionState,
|
||||
iceGatheringState: pc.iceGatheringState,
|
||||
signalingState: pc.signalingState,
|
||||
iceCandidateErrors: pc.iceCandidateErrors,
|
||||
iceCandidates: pc.iceCandidates,
|
||||
values,
|
||||
};
|
||||
} catch (error) {
|
||||
@@ -108,10 +159,6 @@ class WebrtcInternalExporter {
|
||||
|
||||
if (pc.connectionState === "closed") {
|
||||
this.peerConnections.delete(id);
|
||||
} else {
|
||||
if (binding) {
|
||||
setTimeout(binding.bind(this), this.updateInterval, id);
|
||||
}
|
||||
}
|
||||
|
||||
return completeStats;
|
||||
|
3
webrtc-internals-exporter/webpack/babel.config.js
Normal file
3
webrtc-internals-exporter/webpack/babel.config.js
Normal file
@@ -0,0 +1,3 @@
|
||||
module.exports = {
|
||||
shouldPrintComment: () => false
|
||||
};
|
26
webrtc-internals-exporter/webpack/build.js
Normal file
26
webrtc-internals-exporter/webpack/build.js
Normal file
@@ -0,0 +1,26 @@
|
||||
const { execSync } = require('child_process');
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
let url = '';
|
||||
|
||||
args.forEach((arg, index) => {
|
||||
if (arg === '-u' || arg === '--url') {
|
||||
url = args[index + 1];
|
||||
} else if (arg === '-h' || arg === '--help') {
|
||||
console.log('Usage: npm run build -- [-u|--url <url>]');
|
||||
console.log('Options:');
|
||||
console.log(' -u, --url <url> URL to use for the extension collector server');
|
||||
console.log(' -h, --help Display this help message');
|
||||
process.exit(0);
|
||||
} else if (arg.startsWith('-')) {
|
||||
console.error(`Unrecognized argument: ${arg}`);
|
||||
process.exit(1);
|
||||
}
|
||||
});
|
||||
|
||||
if (url) {
|
||||
console.log(`Building with URL: ${url}`);
|
||||
execSync(`webpack --env URL=${url}`, { stdio: 'inherit' });
|
||||
} else {
|
||||
execSync('webpack', { stdio: 'inherit' });
|
||||
}
|
9369
webrtc-internals-exporter/webpack/package-lock.json
generated
Normal file
9369
webrtc-internals-exporter/webpack/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
33
webrtc-internals-exporter/webpack/package.json
Normal file
33
webrtc-internals-exporter/webpack/package.json
Normal file
@@ -0,0 +1,33 @@
|
||||
{
|
||||
"name": "webrtc-internals-exporter",
|
||||
"version": "1.0.0",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"build": "node build.js"
|
||||
},
|
||||
"keywords": [],
|
||||
"author": "Mirko Milovanovic",
|
||||
"license": "MIT",
|
||||
"devDependencies": {
|
||||
"babel-loader": "^8.2.2",
|
||||
"clean-webpack-plugin": "^3.0.0",
|
||||
"copy-webpack-plugin": "^12.0.2",
|
||||
"dotenv": "^16.4.7",
|
||||
"file-loader": "^6.2.0",
|
||||
"html-webpack-plugin": "^5.3.1",
|
||||
"mini-css-extract-plugin": "^1.6.0",
|
||||
"postcss": "^8.2.14",
|
||||
"postcss-loader": "^5.2.0",
|
||||
"postcss-preset-env": "^10.1.4",
|
||||
"sass": "^1.32.12",
|
||||
"sass-loader": "^11.0.1",
|
||||
"serve": "^14.2.4",
|
||||
"style-loader": "^2.0.0",
|
||||
"terser-webpack-plugin": "^5.1.1",
|
||||
"ts-loader": "^9.1.2",
|
||||
"typescript": "^4.2.4",
|
||||
"webpack": "^5.38.1",
|
||||
"webpack-cli": "^4.7.2",
|
||||
"webpack-dev-server": "^5.2.0"
|
||||
}
|
||||
}
|
34
webrtc-internals-exporter/webpack/webpack.config.js
Normal file
34
webrtc-internals-exporter/webpack/webpack.config.js
Normal file
@@ -0,0 +1,34 @@
|
||||
const path = require('path');
|
||||
const { EnvironmentPlugin } = require('webpack');
|
||||
const envPath = path.resolve(__dirname, '../../.env');
|
||||
const envConfig = require('dotenv').config({ path: envPath }).parsed;
|
||||
|
||||
module.exports = (env) => {
|
||||
const url = env.URL || 'http://localhost';
|
||||
|
||||
return {
|
||||
entry: '../background.js',
|
||||
target: 'web',
|
||||
mode: 'production',
|
||||
module: {
|
||||
rules: [
|
||||
{
|
||||
test: /\.js?$/,
|
||||
use: 'babel-loader',
|
||||
exclude: /node_modules/,
|
||||
},
|
||||
],
|
||||
},
|
||||
resolve: { extensions: ['.tsx', '.ts', '.js'] },
|
||||
output: {
|
||||
filename: 'background.bundle.js',
|
||||
path: path.resolve(__dirname, '../'),
|
||||
publicPath: '',
|
||||
},
|
||||
plugins: [
|
||||
new EnvironmentPlugin({
|
||||
WEBRTC_INTERNALS_EXPORTER_URL: envConfig.WEBRTC_INTERNALS_EXPORTER_URL || url
|
||||
}),
|
||||
],
|
||||
};
|
||||
};
|
Reference in New Issue
Block a user