Compare commits
No commits in common. "62183646ae11dfe33f14668ff34e0a2fb882089e" and "26ee35b7fc2433bfb0cdb93bbd7f2a90bfcf648b" have entirely different histories.
62183646ae
...
26ee35b7fc
@ -1,28 +0,0 @@
|
|||||||
name: Build and publish the docker image
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches: ["custom"]
|
|
||||||
|
|
||||||
env:
|
|
||||||
REGISTRY: git.ngn.tf
|
|
||||||
IMAGE: ${{gitea.repository}}
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: "https://github.com/actions/checkout@v4"
|
|
||||||
|
|
||||||
- name: Login to container repo
|
|
||||||
uses: "https://github.com/docker/login-action@v1"
|
|
||||||
with:
|
|
||||||
registry: ${{env.REGISTRY}}
|
|
||||||
username: ${{gitea.actor}}
|
|
||||||
password: ${{secrets.PACKAGES_TOKEN}}
|
|
||||||
|
|
||||||
- name: Build image
|
|
||||||
run: |
|
|
||||||
docker build . --tag ${{env.REGISTRY}}/${{env.IMAGE}}:latest
|
|
||||||
docker push ${{env.REGISTRY}}/${{env.IMAGE}}:latest
|
|
3
.github/FUNDING.yml
vendored
Normal file
3
.github/FUNDING.yml
vendored
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
github: zedeus
|
||||||
|
liberapay: zedeus
|
||||||
|
patreon: nitter
|
60
.github/workflows/build-publish-docker.yml
vendored
Normal file
60
.github/workflows/build-publish-docker.yml
vendored
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
name: Build and Publish Docker
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches: ["master"]
|
||||||
|
paths-ignore: ["README.md"]
|
||||||
|
pull_request:
|
||||||
|
branches: ["master"]
|
||||||
|
paths-ignore: ["README.md"]
|
||||||
|
|
||||||
|
env:
|
||||||
|
IMAGE_NAME: ${{ github.repository }}
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Set up QEMU
|
||||||
|
id: qemu
|
||||||
|
uses: docker/setup-qemu-action@v2
|
||||||
|
with:
|
||||||
|
platforms: arm64
|
||||||
|
|
||||||
|
- name: Setup Docker buildx
|
||||||
|
id: buildx
|
||||||
|
uses: docker/setup-buildx-action@v2
|
||||||
|
|
||||||
|
- name: Log in to GHCR
|
||||||
|
uses: docker/login-action@v2
|
||||||
|
with:
|
||||||
|
registry: ghcr.io
|
||||||
|
username: ${{ github.actor }}
|
||||||
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Extract Docker metadata
|
||||||
|
id: meta
|
||||||
|
uses: docker/metadata-action@v4
|
||||||
|
with:
|
||||||
|
images: |
|
||||||
|
ghcr.io/${{ env.IMAGE_NAME }}
|
||||||
|
|
||||||
|
|
||||||
|
- name: Build and push all platforms Docker image
|
||||||
|
id: build-and-push
|
||||||
|
uses: docker/build-push-action@v4
|
||||||
|
with:
|
||||||
|
context: .
|
||||||
|
push: ${{ github.event_name != 'pull_request' }}
|
||||||
|
tags: ${{ steps.meta.outputs.tags }}
|
||||||
|
labels: ${{ steps.meta.outputs.labels }}
|
||||||
|
platforms: linux/amd64,linux/arm64
|
||||||
|
cache-from: type=gha
|
||||||
|
cache-to: type=gha,mode=max
|
56
.github/workflows/run-tests.yml
vendored
Normal file
56
.github/workflows/run-tests.yml
vendored
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
name: Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
paths-ignore:
|
||||||
|
- "*.md"
|
||||||
|
branches-ignore:
|
||||||
|
- master
|
||||||
|
workflow_call:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test:
|
||||||
|
runs-on: buildjet-2vcpu-ubuntu-2204
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
nim:
|
||||||
|
- "1.6.10"
|
||||||
|
- "1.6.x"
|
||||||
|
- "2.0.x"
|
||||||
|
- "devel"
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v3
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
- name: Cache nimble
|
||||||
|
id: cache-nimble
|
||||||
|
uses: buildjet/cache@v3
|
||||||
|
with:
|
||||||
|
path: ~/.nimble
|
||||||
|
key: ${{ matrix.nim }}-nimble-${{ hashFiles('*.nimble') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ matrix.nim }}-nimble-
|
||||||
|
- uses: actions/setup-python@v4
|
||||||
|
with:
|
||||||
|
python-version: "3.10"
|
||||||
|
cache: "pip"
|
||||||
|
- uses: jiro4989/setup-nim-action@v1
|
||||||
|
with:
|
||||||
|
nim-version: ${{ matrix.nim }}
|
||||||
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
- run: nimble build -d:release -Y
|
||||||
|
- run: pip install seleniumbase
|
||||||
|
- run: seleniumbase install chromedriver
|
||||||
|
- uses: supercharge/redis-github-action@1.5.0
|
||||||
|
- name: Prepare Nitter
|
||||||
|
run: |
|
||||||
|
sudo apt install libsass-dev -y
|
||||||
|
cp nitter.example.conf nitter.conf
|
||||||
|
sed -i 's/enableDebug = false/enableDebug = true/g' nitter.conf
|
||||||
|
nimble md
|
||||||
|
nimble scss
|
||||||
|
echo '${{ secrets.GUEST_ACCOUNTS }}' > ./guest_accounts.jsonl
|
||||||
|
- name: Run tests
|
||||||
|
run: |
|
||||||
|
./nitter &
|
||||||
|
pytest -n8 tests
|
20
.gitignore
vendored
20
.gitignore
vendored
@ -1,16 +1,14 @@
|
|||||||
nitter
|
nitter
|
||||||
*.html
|
*.html
|
||||||
*.db
|
*.db
|
||||||
data
|
/tests/__pycache__
|
||||||
tests/__pycache__
|
/tests/geckodriver.log
|
||||||
tests/geckodriver.log
|
/tests/downloaded_files
|
||||||
tests/downloaded_files
|
/tests/latest_logs
|
||||||
tests/latest_logs
|
/tools/gencss
|
||||||
tools/gencss
|
/tools/rendermd
|
||||||
tools/rendermd
|
/public/css/style.css
|
||||||
public/css/style.css
|
/public/md/*.html
|
||||||
public/md/*.html
|
|
||||||
nitter.conf
|
nitter.conf
|
||||||
compose.yml
|
guest_accounts.json*
|
||||||
accounts.*
|
|
||||||
dump.rdb
|
dump.rdb
|
||||||
|
25
Dockerfile
25
Dockerfile
@ -1,26 +1,25 @@
|
|||||||
FROM nimlang/nim:2.0.0-alpine-regular as build
|
FROM nimlang/nim:2.0.0-alpine-regular as nim
|
||||||
|
LABEL maintainer="setenforce@protonmail.com"
|
||||||
|
|
||||||
RUN apk --no-cache add libsass-dev pcre
|
RUN apk --no-cache add libsass-dev pcre
|
||||||
|
|
||||||
WORKDIR /src
|
WORKDIR /src/nitter
|
||||||
|
|
||||||
COPY nitter.nimble .
|
COPY nitter.nimble .
|
||||||
RUN nimble install -y --depsOnly
|
RUN nimble install -y --depsOnly
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN nimble build -d:danger -d:lto -d:strip
|
RUN nimble build -d:danger -d:lto -d:strip \
|
||||||
RUN nimble scss
|
&& nimble scss \
|
||||||
RUN nimble md
|
&& nimble md
|
||||||
|
|
||||||
FROM alpine:latest
|
FROM alpine:latest
|
||||||
|
WORKDIR /src/
|
||||||
RUN apk --no-cache add pcre ca-certificates
|
RUN apk --no-cache add pcre ca-certificates
|
||||||
RUN useradd -d /src -u 1001 nitter
|
COPY --from=nim /src/nitter/nitter ./
|
||||||
|
COPY --from=nim /src/nitter/nitter.example.conf ./nitter.conf
|
||||||
WORKDIR /srv
|
COPY --from=nim /src/nitter/public ./public
|
||||||
|
EXPOSE 8080
|
||||||
COPY --from=build /srv/nitter ./
|
RUN adduser -h /src/ -D -s /bin/sh nitter
|
||||||
COPY --from=build /srv/public ./public
|
|
||||||
|
|
||||||
USER nitter
|
USER nitter
|
||||||
CMD ./nitter
|
CMD ./nitter
|
||||||
|
25
Dockerfile.arm64
Normal file
25
Dockerfile.arm64
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
FROM alpine:3.18 as nim
|
||||||
|
LABEL maintainer="setenforce@protonmail.com"
|
||||||
|
|
||||||
|
RUN apk --no-cache add libsass-dev pcre gcc git libc-dev "nim=1.6.14-r0" "nimble=0.13.1-r2"
|
||||||
|
|
||||||
|
WORKDIR /src/nitter
|
||||||
|
|
||||||
|
COPY nitter.nimble .
|
||||||
|
RUN nimble install -y --depsOnly
|
||||||
|
|
||||||
|
COPY . .
|
||||||
|
RUN nimble build -d:danger -d:lto -d:strip \
|
||||||
|
&& nimble scss \
|
||||||
|
&& nimble md
|
||||||
|
|
||||||
|
FROM alpine:3.18
|
||||||
|
WORKDIR /src/
|
||||||
|
RUN apk --no-cache add pcre ca-certificates openssl1.1-compat
|
||||||
|
COPY --from=nim /src/nitter/nitter ./
|
||||||
|
COPY --from=nim /src/nitter/nitter.example.conf ./nitter.conf
|
||||||
|
COPY --from=nim /src/nitter/public ./public
|
||||||
|
EXPOSE 8080
|
||||||
|
RUN adduser -h /src/ -D -s /bin/sh nitter
|
||||||
|
USER nitter
|
||||||
|
CMD ./nitter
|
189
README.md
189
README.md
@ -1,5 +1,188 @@
|
|||||||
# [ngn.tf] | nitter
|
# Nitter
|
||||||
|
|
||||||
![](https://git.ngn.tf/ngn/nitter/actions/workflows/build.yml/badge.svg)
|
[![Test Matrix](https://github.com/zedeus/nitter/workflows/Tests/badge.svg)](https://github.com/zedeus/nitter/actions/workflows/run-tests.yml)
|
||||||
|
[![Test Matrix](https://github.com/zedeus/nitter/workflows/Docker/badge.svg)](https://github.com/zedeus/nitter/actions/workflows/build-docker.yml)
|
||||||
|
[![License](https://img.shields.io/github/license/zedeus/nitter?style=flat)](#license)
|
||||||
|
|
||||||
A fork of the [libmedium](https://github.com/PrivacyDevel/nitter) project, with my personal changes.
|
A free and open source alternative Twitter front-end focused on privacy and
|
||||||
|
performance. \
|
||||||
|
Inspired by the [Invidious](https://github.com/iv-org/invidious)
|
||||||
|
project.
|
||||||
|
|
||||||
|
- No JavaScript or ads
|
||||||
|
- All requests go through the backend, client never talks to Twitter
|
||||||
|
- Prevents Twitter from tracking your IP or JavaScript fingerprint
|
||||||
|
- Uses Twitter's unofficial API (no rate limits or developer account required)
|
||||||
|
- Lightweight (for [@nim_lang](https://nitter.net/nim_lang), 60KB vs 784KB from twitter.com)
|
||||||
|
- RSS feeds
|
||||||
|
- Themes
|
||||||
|
- Mobile support (responsive design)
|
||||||
|
- AGPLv3 licensed, no proprietary instances permitted
|
||||||
|
|
||||||
|
Liberapay: https://liberapay.com/zedeus \
|
||||||
|
Patreon: https://patreon.com/nitter \
|
||||||
|
BTC: bc1qp7q4qz0fgfvftm5hwz3vy284nue6jedt44kxya \
|
||||||
|
ETH: 0x66d84bc3fd031b62857ad18c62f1ba072b011925 \
|
||||||
|
LTC: ltc1qhsz5nxw6jw9rdtw9qssjeq2h8hqk2f85rdgpkr \
|
||||||
|
XMR: 42hKayRoEAw4D6G6t8mQHPJHQcXqofjFuVfavqKeNMNUZfeJLJAcNU19i1bGdDvcdN6romiSscWGWJCczFLe9RFhM3d1zpL
|
||||||
|
|
||||||
|
## Roadmap
|
||||||
|
|
||||||
|
- Embeds
|
||||||
|
- Account system with timeline support
|
||||||
|
- Archiving tweets/profiles
|
||||||
|
- Developer API
|
||||||
|
|
||||||
|
## Resources
|
||||||
|
|
||||||
|
The wiki contains
|
||||||
|
[a list of instances](https://github.com/zedeus/nitter/wiki/Instances) and
|
||||||
|
[browser extensions](https://github.com/zedeus/nitter/wiki/Extensions)
|
||||||
|
maintained by the community.
|
||||||
|
|
||||||
|
## Why?
|
||||||
|
|
||||||
|
It's impossible to use Twitter without JavaScript enabled. For privacy-minded
|
||||||
|
folks, preventing JavaScript analytics and IP-based tracking is important, but
|
||||||
|
apart from using a VPN and uBlock/uMatrix, it's impossible. Despite being behind
|
||||||
|
a VPN and using heavy-duty adblockers, you can get accurately tracked with your
|
||||||
|
[browser's fingerprint](https://restoreprivacy.com/browser-fingerprinting/),
|
||||||
|
[no JavaScript required](https://noscriptfingerprint.com/). This all became
|
||||||
|
particularly important after Twitter [removed the
|
||||||
|
ability](https://www.eff.org/deeplinks/2020/04/twitter-removes-privacy-option-and-shows-why-we-need-strong-privacy-laws)
|
||||||
|
for users to control whether their data gets sent to advertisers.
|
||||||
|
|
||||||
|
Using an instance of Nitter (hosted on a VPS for example), you can browse
|
||||||
|
Twitter without JavaScript while retaining your privacy. In addition to
|
||||||
|
respecting your privacy, Nitter is on average around 15 times lighter than
|
||||||
|
Twitter, and in most cases serves pages faster (eg. timelines load 2-4x faster).
|
||||||
|
|
||||||
|
In the future a simple account system will be added that lets you follow Twitter
|
||||||
|
users, allowing you to have a clean chronological timeline without needing a
|
||||||
|
Twitter account.
|
||||||
|
|
||||||
|
## Screenshot
|
||||||
|
|
||||||
|
![nitter](/screenshot.png)
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
### Dependencies
|
||||||
|
|
||||||
|
- libpcre
|
||||||
|
- libsass
|
||||||
|
- redis
|
||||||
|
|
||||||
|
To compile Nitter you need a Nim installation, see
|
||||||
|
[nim-lang.org](https://nim-lang.org/install.html) for details. It is possible to
|
||||||
|
install it system-wide or in the user directory you create below.
|
||||||
|
|
||||||
|
To compile the scss files, you need to install `libsass`. On Ubuntu and Debian,
|
||||||
|
you can use `libsass-dev`.
|
||||||
|
|
||||||
|
Redis is required for caching and in the future for account info. It should be
|
||||||
|
available on most distros as `redis` or `redis-server` (Ubuntu/Debian).
|
||||||
|
Running it with the default config is fine, Nitter's default config is set to
|
||||||
|
use the default Redis port and localhost.
|
||||||
|
|
||||||
|
Here's how to create a `nitter` user, clone the repo, and build the project
|
||||||
|
along with the scss and md files.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# useradd -m nitter
|
||||||
|
# su nitter
|
||||||
|
$ git clone https://github.com/zedeus/nitter
|
||||||
|
$ cd nitter
|
||||||
|
$ nimble build -d:release
|
||||||
|
$ nimble scss
|
||||||
|
$ nimble md
|
||||||
|
$ cp nitter.example.conf nitter.conf
|
||||||
|
```
|
||||||
|
|
||||||
|
Set your hostname, port, HMAC key, https (must be correct for cookies), and
|
||||||
|
Redis info in `nitter.conf`. To run Redis, either run
|
||||||
|
`redis-server --daemonize yes`, or `systemctl enable --now redis` (or
|
||||||
|
redis-server depending on the distro). Run Nitter by executing `./nitter` or
|
||||||
|
using the systemd service below. You should run Nitter behind a reverse proxy
|
||||||
|
such as [Nginx](https://github.com/zedeus/nitter/wiki/Nginx) or
|
||||||
|
[Apache](https://github.com/zedeus/nitter/wiki/Apache) for security and
|
||||||
|
performance reasons.
|
||||||
|
|
||||||
|
### Docker
|
||||||
|
|
||||||
|
Page for the Docker image: https://hub.docker.com/r/zedeus/nitter
|
||||||
|
|
||||||
|
#### NOTE: For ARM64 support, please use the separate ARM64 docker image: [`zedeus/nitter:latest-arm64`](https://hub.docker.com/r/zedeus/nitter/tags).
|
||||||
|
|
||||||
|
To run Nitter with Docker, you'll need to install and run Redis separately
|
||||||
|
before you can run the container. See below for how to also run Redis using
|
||||||
|
Docker.
|
||||||
|
|
||||||
|
To build and run Nitter in Docker:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker build -t nitter:latest .
|
||||||
|
docker run -v $(pwd)/nitter.conf:/src/nitter.conf -d --network host nitter:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
Note: For ARM64, use this Dockerfile: [`Dockerfile.arm64`](https://github.com/zedeus/nitter/blob/master/Dockerfile.arm64).
|
||||||
|
|
||||||
|
A prebuilt Docker image is provided as well:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker run -v $(pwd)/nitter.conf:/src/nitter.conf -d --network host zedeus/nitter:latest
|
||||||
|
```
|
||||||
|
|
||||||
|
Using docker-compose to run both Nitter and Redis as different containers:
|
||||||
|
Change `redisHost` from `localhost` to `nitter-redis` in `nitter.conf`, then run:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
docker-compose up -d
|
||||||
|
```
|
||||||
|
|
||||||
|
Note the Docker commands expect a `nitter.conf` file in the directory you run
|
||||||
|
them.
|
||||||
|
|
||||||
|
### systemd
|
||||||
|
|
||||||
|
To run Nitter via systemd you can use this service file:
|
||||||
|
|
||||||
|
```ini
|
||||||
|
[Unit]
|
||||||
|
Description=Nitter (An alternative Twitter front-end)
|
||||||
|
After=syslog.target
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
Type=simple
|
||||||
|
|
||||||
|
# set user and group
|
||||||
|
User=nitter
|
||||||
|
Group=nitter
|
||||||
|
|
||||||
|
# configure location
|
||||||
|
WorkingDirectory=/home/nitter/nitter
|
||||||
|
ExecStart=/home/nitter/nitter/nitter
|
||||||
|
|
||||||
|
Restart=always
|
||||||
|
RestartSec=15
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
```
|
||||||
|
|
||||||
|
Then enable and run the service:
|
||||||
|
`systemctl enable --now nitter.service`
|
||||||
|
|
||||||
|
### Logging
|
||||||
|
|
||||||
|
Nitter currently prints some errors to stdout, and there is no real logging
|
||||||
|
implemented. If you're running Nitter with systemd, you can check stdout like
|
||||||
|
this: `journalctl -u nitter.service` (add `--follow` to see just the last 15
|
||||||
|
lines). If you're running the Docker image, you can do this:
|
||||||
|
`docker logs --follow *nitter container id*`
|
||||||
|
|
||||||
|
## Contact
|
||||||
|
|
||||||
|
Feel free to join our [Matrix channel](https://matrix.to/#/#nitter:matrix.org).
|
||||||
|
You can email me at zedeus@pm.me if you wish to contact me personally.
|
||||||
|
@ -1,32 +0,0 @@
|
|||||||
services:
|
|
||||||
nitter:
|
|
||||||
container_name: nitter
|
|
||||||
image: git.ngn.tf/ngn/nitter
|
|
||||||
ports:
|
|
||||||
- 80:8080
|
|
||||||
volumes:
|
|
||||||
- ./nitter.conf:/srv/nitter.conf:Z,ro
|
|
||||||
- ./accounts.jsonl:/srv/accounts.jsonl:Z,ro
|
|
||||||
depends_on:
|
|
||||||
- nitter-redis
|
|
||||||
restart: unless-stopped
|
|
||||||
user: 998:998
|
|
||||||
security_opt:
|
|
||||||
- no-new-privileges:true
|
|
||||||
cap_drop:
|
|
||||||
- ALL
|
|
||||||
read_only: true
|
|
||||||
|
|
||||||
nitter_redis:
|
|
||||||
container_name: nitter_redis
|
|
||||||
image: redis:6-alpine
|
|
||||||
command: redis-server --save 60 1 --loglevel warning
|
|
||||||
volumes:
|
|
||||||
- ./data:/data
|
|
||||||
restart: unless-stopped
|
|
||||||
user: 999:1000
|
|
||||||
security_opt:
|
|
||||||
- no-new-privileges:true
|
|
||||||
cap_drop:
|
|
||||||
- ALL
|
|
||||||
read_only: true
|
|
47
docker-compose.yml
Normal file
47
docker-compose.yml
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
version: "3"
|
||||||
|
|
||||||
|
services:
|
||||||
|
|
||||||
|
nitter:
|
||||||
|
image: ghcr.io/privacydevel/nitter:master
|
||||||
|
container_name: nitter
|
||||||
|
ports:
|
||||||
|
- "127.0.0.1:8080:8080" # Replace with "8080:8080" if you don't use a reverse proxy
|
||||||
|
volumes:
|
||||||
|
- ./nitter.conf:/src/nitter.conf:Z,ro
|
||||||
|
depends_on:
|
||||||
|
- nitter-redis
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: wget -nv --tries=1 --spider http://127.0.0.1:8080/Jack/status/20 || exit 1
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 2
|
||||||
|
user: "998:998"
|
||||||
|
read_only: true
|
||||||
|
security_opt:
|
||||||
|
- no-new-privileges:true
|
||||||
|
cap_drop:
|
||||||
|
- ALL
|
||||||
|
|
||||||
|
nitter-redis:
|
||||||
|
image: redis:6-alpine
|
||||||
|
container_name: nitter-redis
|
||||||
|
command: redis-server --save 60 1 --loglevel warning
|
||||||
|
volumes:
|
||||||
|
- nitter-redis:/data
|
||||||
|
restart: unless-stopped
|
||||||
|
healthcheck:
|
||||||
|
test: redis-cli ping
|
||||||
|
interval: 30s
|
||||||
|
timeout: 5s
|
||||||
|
retries: 2
|
||||||
|
user: "999:1000"
|
||||||
|
read_only: true
|
||||||
|
security_opt:
|
||||||
|
- no-new-privileges:true
|
||||||
|
cap_drop:
|
||||||
|
- ALL
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
nitter-redis:
|
@ -1,4 +1,4 @@
|
|||||||
[server]
|
[Server]
|
||||||
hostname = "nitter.net" # for generating links, change this to your own domain/ip
|
hostname = "nitter.net" # for generating links, change this to your own domain/ip
|
||||||
title = "nitter"
|
title = "nitter"
|
||||||
address = "0.0.0.0"
|
address = "0.0.0.0"
|
||||||
@ -6,9 +6,8 @@ port = 8080
|
|||||||
https = false # disable to enable cookies when not using https
|
https = false # disable to enable cookies when not using https
|
||||||
httpMaxConnections = 100
|
httpMaxConnections = 100
|
||||||
staticDir = "./public"
|
staticDir = "./public"
|
||||||
accountsFile = "./accounts.jsonl"
|
|
||||||
|
|
||||||
[cache]
|
[Cache]
|
||||||
listMinutes = 240 # how long to cache list info (not the tweets, so keep it high)
|
listMinutes = 240 # how long to cache list info (not the tweets, so keep it high)
|
||||||
rssMinutes = 10 # how long to cache rss queries
|
rssMinutes = 10 # how long to cache rss queries
|
||||||
redisHost = "localhost" # Change to "nitter-redis" if using docker-compose
|
redisHost = "localhost" # Change to "nitter-redis" if using docker-compose
|
||||||
@ -20,7 +19,7 @@ redisMaxConnections = 30
|
|||||||
# goes above this, they're closed when released. don't worry about this unless
|
# goes above this, they're closed when released. don't worry about this unless
|
||||||
# you receive tons of requests per second
|
# you receive tons of requests per second
|
||||||
|
|
||||||
[config]
|
[Config]
|
||||||
hmacKey = "secretkey" # random key for cryptographic signing of video urls
|
hmacKey = "secretkey" # random key for cryptographic signing of video urls
|
||||||
base64Media = false # use base64 encoding for proxied media urls
|
base64Media = false # use base64 encoding for proxied media urls
|
||||||
enableRSS = true # set this to false to disable RSS feeds
|
enableRSS = true # set this to false to disable RSS feeds
|
||||||
@ -34,8 +33,11 @@ tokenCount = 10
|
|||||||
# always at least `tokenCount` usable tokens. only increase this if you receive
|
# always at least `tokenCount` usable tokens. only increase this if you receive
|
||||||
# major bursts all the time and don't have a rate limiting setup via e.g. nginx
|
# major bursts all the time and don't have a rate limiting setup via e.g. nginx
|
||||||
|
|
||||||
|
#cookieHeader = "ct0=XXXXXXXXXXXXXXXXX; auth_token=XXXXXXXXXXXXXX" # authentication cookie of a logged in account, required for the likes tab and NSFW content
|
||||||
|
#xCsrfToken = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX" # required for the likes tab and NSFW content
|
||||||
|
|
||||||
# Change default preferences here, see src/prefs_impl.nim for a complete list
|
# Change default preferences here, see src/prefs_impl.nim for a complete list
|
||||||
[preferences]
|
[Preferences]
|
||||||
theme = "Nitter"
|
theme = "Nitter"
|
||||||
replaceTwitter = "nitter.net"
|
replaceTwitter = "nitter.net"
|
||||||
replaceYouTube = "piped.video"
|
replaceYouTube = "piped.video"
|
||||||
|
BIN
screenshot.png
Normal file
BIN
screenshot.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 957 KiB |
@ -1,52 +0,0 @@
|
|||||||
#!/bin/bash -e
|
|
||||||
|
|
||||||
# Grab oauth token for use with Nitter (requires Twitter account).
|
|
||||||
# results: {"oauth_token":"xxxxxxxxxx-xxxxxxxxx","oauth_token_secret":"xxxxxxxxxxxxxxxxxxxxx"}
|
|
||||||
|
|
||||||
if [ $# -ne 2 ]; then
|
|
||||||
echo "please specify a username and password"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
username="${1}"
|
|
||||||
password="${2}"
|
|
||||||
|
|
||||||
bearer_token='AAAAAAAAAAAAAAAAAAAAAFXzAwAAAAAAMHCxpeSDG1gLNLghVe8d74hl6k4%3DRUMF4xAQLsbeBhTSRrCiQpJtxoGWeyHrDb5te2jpGskWDFW82F'
|
|
||||||
guest_token=$(curl -s -XPOST https://api.twitter.com/1.1/guest/activate.json -H "Authorization: Bearer ${bearer_token}" | jq -r '.guest_token')
|
|
||||||
base_url='https://api.twitter.com/1.1/onboarding/task.json'
|
|
||||||
header=(-H "Authorization: Bearer ${bearer_token}" -H "User-Agent: TwitterAndroid/10.21.1" -H "Content-Type: application/json" -H "X-Guest-Token: ${guest_token}")
|
|
||||||
|
|
||||||
# start flow
|
|
||||||
flow_1=$(curl -si -XPOST "${base_url}?flow_name=login" "${header[@]}")
|
|
||||||
|
|
||||||
# get 'att', now needed in headers, and 'flow_token' from flow_1
|
|
||||||
att=$(sed -En 's/^att: (.*)\r/\1/p' <<< "${flow_1}")
|
|
||||||
flow_token=$(sed -n '$p' <<< "${flow_1}" | jq -r .flow_token)
|
|
||||||
|
|
||||||
if [[ -z "$flow_1" || -z "$flow_token" ]]; then
|
|
||||||
echo "Couldn't retrieve flow token (twitter not reachable?)"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# username
|
|
||||||
token_2=$(curl -s -XPOST "${base_url}" -H "att: ${att}" "${header[@]}" \
|
|
||||||
-d '{"flow_token":"'"${flow_token}"'","subtask_inputs":[{"subtask_id":"LoginEnterUserIdentifierSSO","settings_list":{"setting_responses":[{"key":"user_identifier","response_data":{"text_data":{"result":"'"${username}"'"}}}],"link":"next_link"}}]}' | jq -r .flow_token)
|
|
||||||
|
|
||||||
if [[ -z "$token_2" || "$token_2" == "null" ]]; then
|
|
||||||
echo "Couldn't retrieve user token (check if login is correct)"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# password
|
|
||||||
token_3=$(curl -s -XPOST "${base_url}" -H "att: ${att}" "${header[@]}" \
|
|
||||||
-d '{"flow_token":"'"${token_2}"'","subtask_inputs":[{"enter_password":{"password":"'"${password}"'","link":"next_link"},"subtask_id":"LoginEnterPassword"}]}' | jq -r .flow_token)
|
|
||||||
|
|
||||||
if [[ -z "$token_3" || "$token_3" == "null" ]]; then
|
|
||||||
echo "Couldn't retrieve user token (check if password is correct)"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# finally print oauth_token and secret
|
|
||||||
curl -s -XPOST "${base_url}" -H "att: ${att}" "${header[@]}" \
|
|
||||||
-d '{"flow_token":"'"${token_3}"'","subtask_inputs":[{"check_logged_in_account":{"link":"AccountDuplicationCheck_false"},"subtask_id":"AccountDuplicationCheck"}]}' | \
|
|
||||||
jq -c '.subtasks[0]|if(.open_account) then [{oauth_token: .open_account.oauth_token, oauth_token_secret: .open_account.oauth_token_secret}] else empty end'
|
|
18
src/api.nim
18
src/api.nim
@ -71,20 +71,10 @@ proc getGraphListMembers*(list: List; after=""): Future[Result[User]] {.async.}
|
|||||||
|
|
||||||
proc getFavorites*(id: string; cfg: Config; after=""): Future[Profile] {.async.} =
|
proc getFavorites*(id: string; cfg: Config; after=""): Future[Profile] {.async.} =
|
||||||
if id.len == 0: return
|
if id.len == 0: return
|
||||||
var
|
let
|
||||||
variables = %*{
|
ps = genParams({"userId": id}, after)
|
||||||
"userId": id,
|
url = consts.favorites / (id & ".json") ? ps
|
||||||
"includePromotedContent":false,
|
result = parseTimeline(await fetch(url, Api.favorites), after)
|
||||||
"withClientEventToken":false,
|
|
||||||
"withBirdwatchNotes":false,
|
|
||||||
"withVoice":true,
|
|
||||||
"withV2Timeline":false
|
|
||||||
}
|
|
||||||
if after.len > 0:
|
|
||||||
variables["cursor"] = % after
|
|
||||||
let
|
|
||||||
url = consts.favorites ? {"variables": $variables, "features": gqlFeatures}
|
|
||||||
result = parseGraphTimeline(await fetch(url, Api.favorites), after)
|
|
||||||
|
|
||||||
proc getGraphTweetResult*(id: string): Future[Tweet] {.async.} =
|
proc getGraphTweetResult*(id: string): Future[Tweet] {.async.} =
|
||||||
if id.len == 0: return
|
if id.len == 0: return
|
||||||
|
@ -49,7 +49,7 @@ proc getOauthHeader(url, oauthToken, oauthTokenSecret: string): string =
|
|||||||
|
|
||||||
proc genHeaders*(url, oauthToken, oauthTokenSecret: string): HttpHeaders =
|
proc genHeaders*(url, oauthToken, oauthTokenSecret: string): HttpHeaders =
|
||||||
let header = getOauthHeader(url, oauthToken, oauthTokenSecret)
|
let header = getOauthHeader(url, oauthToken, oauthTokenSecret)
|
||||||
|
|
||||||
result = newHttpHeaders({
|
result = newHttpHeaders({
|
||||||
"connection": "keep-alive",
|
"connection": "keep-alive",
|
||||||
"authorization": header,
|
"authorization": header,
|
||||||
@ -149,6 +149,11 @@ template retry(bod) =
|
|||||||
|
|
||||||
proc fetch*(url: Uri; api: Api; additional_headers: HttpHeaders = newHttpHeaders()): Future[JsonNode] {.async.} =
|
proc fetch*(url: Uri; api: Api; additional_headers: HttpHeaders = newHttpHeaders()): Future[JsonNode] {.async.} =
|
||||||
|
|
||||||
|
if len(cfg.cookieHeader) != 0:
|
||||||
|
additional_headers.add("Cookie", cfg.cookieHeader)
|
||||||
|
if len(cfg.xCsrfToken) != 0:
|
||||||
|
additional_headers.add("x-csrf-token", cfg.xCsrfToken)
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
var body: string
|
var body: string
|
||||||
fetchImpl(body, additional_headers):
|
fetchImpl(body, additional_headers):
|
||||||
|
22
src/auth.nim
22
src/auth.nim
@ -185,22 +185,22 @@ proc setRateLimit*(account: GuestAccount; api: Api; remaining, reset: int) =
|
|||||||
|
|
||||||
account.apis[api] = RateLimit(remaining: remaining, reset: reset)
|
account.apis[api] = RateLimit(remaining: remaining, reset: reset)
|
||||||
|
|
||||||
proc initAccountPool*(cfg: Config) =
|
proc initAccountPool*(cfg: Config; path: string) =
|
||||||
let path = cfg.accountsFile
|
|
||||||
enableLogging = cfg.enableDebug
|
enableLogging = cfg.enableDebug
|
||||||
|
|
||||||
if !path.endswith(".jsonl"):
|
let jsonlPath = if path.endsWith(".json"): (path & 'l') else: path
|
||||||
log "Accounts file should be formated with JSONL"
|
|
||||||
quit 1
|
|
||||||
|
|
||||||
if !fileExists(path):
|
if fileExists(jsonlPath):
|
||||||
log "Failed to access the accounts file (", path, ")"
|
log "Parsing JSONL guest accounts file: ", jsonlPath
|
||||||
|
for line in jsonlPath.lines:
|
||||||
|
accountPool.add parseGuestAccount(line)
|
||||||
|
elif fileExists(path):
|
||||||
|
log "Parsing JSON guest accounts file: ", path
|
||||||
|
accountPool = parseGuestAccounts(path)
|
||||||
|
else:
|
||||||
|
echo "[accounts] ERROR: ", path, " not found. This file is required to authenticate API requests."
|
||||||
quit 1
|
quit 1
|
||||||
|
|
||||||
log "Parsing JSONL accounts file: ", path
|
|
||||||
for line in path.lines:
|
|
||||||
accountPool.add parseGuestAccount(line)
|
|
||||||
|
|
||||||
let accountsPrePurge = accountPool.len
|
let accountsPrePurge = accountPool.len
|
||||||
#accountPool.keepItIf(not it.hasExpired)
|
#accountPool.keepItIf(not it.hasExpired)
|
||||||
|
|
||||||
|
@ -16,33 +16,34 @@ proc getConfig*(path: string): (Config, parseCfg.Config) =
|
|||||||
|
|
||||||
let conf = Config(
|
let conf = Config(
|
||||||
# Server
|
# Server
|
||||||
address: cfg.get("server", "address", "0.0.0.0"),
|
address: cfg.get("Server", "address", "0.0.0.0"),
|
||||||
port: cfg.get("server", "port", 8080),
|
port: cfg.get("Server", "port", 8080),
|
||||||
useHttps: cfg.get("server", "https", true),
|
useHttps: cfg.get("Server", "https", true),
|
||||||
httpMaxConns: cfg.get("server", "httpMaxConnections", 100),
|
httpMaxConns: cfg.get("Server", "httpMaxConnections", 100),
|
||||||
staticDir: cfg.get("server", "staticDir", "./public"),
|
staticDir: cfg.get("Server", "staticDir", "./public"),
|
||||||
accountsFile: cfg.get("server", "accountsFile", "./accounts.jsonl"),
|
title: cfg.get("Server", "title", "Nitter"),
|
||||||
title: cfg.get("server", "title", "Nitter"),
|
hostname: cfg.get("Server", "hostname", "nitter.net"),
|
||||||
hostname: cfg.get("server", "hostname", "nitter.net"),
|
|
||||||
|
|
||||||
# Cache
|
# Cache
|
||||||
listCacheTime: cfg.get("cache", "listMinutes", 120),
|
listCacheTime: cfg.get("Cache", "listMinutes", 120),
|
||||||
rssCacheTime: cfg.get("cache", "rssMinutes", 10),
|
rssCacheTime: cfg.get("Cache", "rssMinutes", 10),
|
||||||
|
|
||||||
redisHost: cfg.get("cache", "redisHost", "localhost"),
|
redisHost: cfg.get("Cache", "redisHost", "localhost"),
|
||||||
redisPort: cfg.get("cache", "redisPort", 6379),
|
redisPort: cfg.get("Cache", "redisPort", 6379),
|
||||||
redisConns: cfg.get("cache", "redisConnections", 20),
|
redisConns: cfg.get("Cache", "redisConnections", 20),
|
||||||
redisMaxConns: cfg.get("cache", "redisMaxConnections", 30),
|
redisMaxConns: cfg.get("Cache", "redisMaxConnections", 30),
|
||||||
redisPassword: cfg.get("cache", "redisPassword", ""),
|
redisPassword: cfg.get("Cache", "redisPassword", ""),
|
||||||
|
|
||||||
# Config
|
# Config
|
||||||
hmacKey: cfg.get("config", "hmacKey", "secretkey"),
|
hmacKey: cfg.get("Config", "hmacKey", "secretkey"),
|
||||||
base64Media: cfg.get("config", "base64Media", false),
|
base64Media: cfg.get("Config", "base64Media", false),
|
||||||
minTokens: cfg.get("config", "tokenCount", 10),
|
minTokens: cfg.get("Config", "tokenCount", 10),
|
||||||
enableRss: cfg.get("config", "enableRSS", true),
|
enableRss: cfg.get("Config", "enableRSS", true),
|
||||||
enableDebug: cfg.get("config", "enableDebug", false),
|
enableDebug: cfg.get("Config", "enableDebug", false),
|
||||||
proxy: cfg.get("config", "proxy", ""),
|
proxy: cfg.get("Config", "proxy", ""),
|
||||||
proxyAuth: cfg.get("config", "proxyAuth", "")
|
proxyAuth: cfg.get("Config", "proxyAuth", ""),
|
||||||
|
cookieHeader: cfg.get("Config", "cookieHeader", ""),
|
||||||
|
xCsrfToken: cfg.get("Config", "xCsrfToken", "")
|
||||||
)
|
)
|
||||||
|
|
||||||
return (conf, cfg)
|
return (conf, cfg)
|
||||||
|
@ -16,7 +16,10 @@ import routes/[
|
|||||||
const instancesUrl = "https://github.com/zedeus/nitter/wiki/Instances"
|
const instancesUrl = "https://github.com/zedeus/nitter/wiki/Instances"
|
||||||
const issuesUrl = "https://github.com/zedeus/nitter/issues"
|
const issuesUrl = "https://github.com/zedeus/nitter/issues"
|
||||||
|
|
||||||
initAccountPool(cfg)
|
let
|
||||||
|
accountsPath = getEnv("NITTER_ACCOUNTS_FILE", "./guest_accounts.json")
|
||||||
|
|
||||||
|
initAccountPool(cfg, accountsPath)
|
||||||
|
|
||||||
if not cfg.enableDebug:
|
if not cfg.enableDebug:
|
||||||
# Silence Jester's query warning
|
# Silence Jester's query warning
|
||||||
|
@ -33,7 +33,6 @@ proc parseGraphUser(js: JsonNode): User =
|
|||||||
var user = js{"user_result", "result"}
|
var user = js{"user_result", "result"}
|
||||||
if user.isNull:
|
if user.isNull:
|
||||||
user = ? js{"user_results", "result"}
|
user = ? js{"user_results", "result"}
|
||||||
|
|
||||||
result = parseUser(user{"legacy"})
|
result = parseUser(user{"legacy"})
|
||||||
|
|
||||||
if result.verifiedType == VerifiedType.none and user{"is_blue_verified"}.getBool(false):
|
if result.verifiedType == VerifiedType.none and user{"is_blue_verified"}.getBool(false):
|
||||||
@ -535,8 +534,7 @@ proc parseGraphTimeline*(js: JsonNode; root: string; after=""): Profile =
|
|||||||
|
|
||||||
let instructions =
|
let instructions =
|
||||||
if root == "list": ? js{"data", "list", "timeline_response", "timeline", "instructions"}
|
if root == "list": ? js{"data", "list", "timeline_response", "timeline", "instructions"}
|
||||||
elif root == "user": ? js{"data", "user_result", "result", "timeline_response", "timeline", "instructions"}
|
else: ? js{"data", "user_result", "result", "timeline_response", "timeline", "instructions"}
|
||||||
else: ? js{"data", "user", "result", "timeline", "timeline", "instructions"}
|
|
||||||
|
|
||||||
if instructions.len == 0:
|
if instructions.len == 0:
|
||||||
return
|
return
|
||||||
@ -556,21 +554,6 @@ proc parseGraphTimeline*(js: JsonNode; root: string; after=""): Profile =
|
|||||||
result.tweets.content.add thread.content
|
result.tweets.content.add thread.content
|
||||||
elif entryId.startsWith("cursor-bottom"):
|
elif entryId.startsWith("cursor-bottom"):
|
||||||
result.tweets.bottom = e{"content", "value"}.getStr
|
result.tweets.bottom = e{"content", "value"}.getStr
|
||||||
# TODO cleanup
|
|
||||||
if i{"type"}.getStr == "TimelineAddEntries":
|
|
||||||
for e in i{"entries"}:
|
|
||||||
let entryId = e{"entryId"}.getStr
|
|
||||||
if entryId.startsWith("tweet"):
|
|
||||||
with tweetResult, e{"content", "itemContent", "tweet_results", "result"}:
|
|
||||||
let tweet = parseGraphTweet(tweetResult, false)
|
|
||||||
if not tweet.available:
|
|
||||||
tweet.id = parseBiggestInt(entryId.getId())
|
|
||||||
result.tweets.content.add tweet
|
|
||||||
elif "-conversation-" in entryId or entryId.startsWith("homeConversation"):
|
|
||||||
let (thread, self) = parseGraphThread(e)
|
|
||||||
result.tweets.content.add thread.content
|
|
||||||
elif entryId.startsWith("cursor-bottom"):
|
|
||||||
result.tweets.bottom = e{"content", "value"}.getStr
|
|
||||||
if after.len == 0 and i{"__typename"}.getStr == "TimelinePinEntry":
|
if after.len == 0 and i{"__typename"}.getStr == "TimelinePinEntry":
|
||||||
with tweetResult, i{"entry", "content", "content", "tweetResult", "result"}:
|
with tweetResult, i{"entry", "content", "content", "tweetResult", "result"}:
|
||||||
let tweet = parseGraphTweet(tweetResult, false)
|
let tweet = parseGraphTweet(tweetResult, false)
|
||||||
|
@ -265,7 +265,6 @@ type
|
|||||||
title*: string
|
title*: string
|
||||||
hostname*: string
|
hostname*: string
|
||||||
staticDir*: string
|
staticDir*: string
|
||||||
accountsFile*: string
|
|
||||||
|
|
||||||
hmacKey*: string
|
hmacKey*: string
|
||||||
base64Media*: bool
|
base64Media*: bool
|
||||||
@ -283,7 +282,9 @@ type
|
|||||||
redisConns*: int
|
redisConns*: int
|
||||||
redisMaxConns*: int
|
redisMaxConns*: int
|
||||||
redisPassword*: string
|
redisPassword*: string
|
||||||
redisDb*: int
|
|
||||||
|
cookieHeader*: string
|
||||||
|
xCsrfToken*: string
|
||||||
|
|
||||||
Rss* = object
|
Rss* = object
|
||||||
feed*, cursor*: string
|
feed*, cursor*: string
|
||||||
|
@ -38,8 +38,9 @@ proc renderProfileTabs*(query: Query; username: string; cfg: Config): VNode =
|
|||||||
a(href=(link & "/with_replies")): text "Tweets & Replies"
|
a(href=(link & "/with_replies")): text "Tweets & Replies"
|
||||||
li(class=query.getTabClass(media)):
|
li(class=query.getTabClass(media)):
|
||||||
a(href=(link & "/media")): text "Media"
|
a(href=(link & "/media")): text "Media"
|
||||||
li(class=query.getTabClass(favorites)):
|
if len(cfg.xCsrfToken) != 0 and len(cfg.cookieHeader) != 0:
|
||||||
a(href=(link & "/favorites")): text "Likes"
|
li(class=query.getTabClass(favorites)):
|
||||||
|
a(href=(link & "/favorites")): text "Likes"
|
||||||
li(class=query.getTabClass(tweets)):
|
li(class=query.getTabClass(tweets)):
|
||||||
a(href=(link & "/search")): text "Search"
|
a(href=(link & "/search")): text "Search"
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user