Merge remote-tracking branch 'origin/main' into masenf/multiprocess-compile-try-again

This commit is contained in:
Masen Furer 2024-03-14 10:17:23 -07:00
commit c5f1a2fca9
No known key found for this signature in database
GPG Key ID: B0008AD22B3B3A95
16 changed files with 1342 additions and 259 deletions

View File

@ -21,11 +21,10 @@ env:
PYTHONIOENCODING: 'utf8' PYTHONIOENCODING: 'utf8'
TELEMETRY_ENABLED: false TELEMETRY_ENABLED: false
NODE_OPTIONS: '--max_old_space_size=4096' NODE_OPTIONS: '--max_old_space_size=4096'
DATABASE_URL: ${{ secrets.DATABASE_URL }}
jobs: jobs:
reflex-web: reflex-web:
env:
DATABASE_URL: ${{ secrets.DATABASE_URL }}
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
@ -70,8 +69,58 @@ jobs:
- name: Run Benchmarks - name: Run Benchmarks
# Only run if the database creds are available in this context. # Only run if the database creds are available in this context.
if: ${{ env.DATABASE_URL }} if: ${{ env.DATABASE_URL }}
working-directory: ./integration/benchmarks run: poetry run python scripts/lighthouse_score_upload.py "$GITHUB_SHA" ./integration/benchmarks/.lighthouseci
run: poetry run python benchmarks.py "$GITHUB_SHA" .lighthouseci
env: env:
GITHUB_SHA: ${{ github.sha }} GITHUB_SHA: ${{ github.sha }}
PR_TITLE: ${{ github.event.pull_request.title }} PR_TITLE: ${{ github.event.pull_request.title }}
simple-apps-benchmarks:
env:
OUTPUT_FILE: benchmarks.json
timeout-minutes: 50
strategy:
# Prioritize getting more information out of the workflow (even if something fails)
fail-fast: false
matrix:
# Show OS combos first in GUI
os: [ubuntu-latest, windows-latest, macos-latest]
python-version: ['3.8.18', '3.9.18', '3.10.13', '3.11.5', '3.12.0']
exclude:
- os: windows-latest
python-version: '3.10.13'
- os: windows-latest
python-version: '3.9.18'
- os: windows-latest
python-version: '3.8.18'
include:
- os: windows-latest
python-version: '3.10.11'
- os: windows-latest
python-version: '3.9.13'
- os: windows-latest
python-version: '3.8.10'
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- uses: ./.github/actions/setup_build_env
with:
python-version: ${{ matrix.python-version }}
run-poetry-install: true
create-venv-at-path: .venv
- name: Install additional dependencies for DB access
run: poetry run pip install psycopg2-binary
- name: Run benchmark tests
env:
APP_HARNESS_HEADLESS: 1
PYTHONUNBUFFERED: 1
run: |
poetry run pytest -v benchmarks/ --benchmark-json=${{ env.OUTPUT_FILE }} -s
- name: Upload benchmark results
# Only run if the database creds are available in this context.
if: ${{ env.DATABASE_URL }}
run: poetry run python scripts/simple_app_benchmark_upload.py --os "${{ matrix.os }}"
--python-version "${{ matrix.python-version }}" --commit-sha "${{ github.sha }}"
--benchmark-json "${{ env.OUTPUT_FILE }}" --pr-title "${{ github.event.pull_request.title }}"
--db-url "${{ env.DATABASE_URL }}" --branch-name "${{ github.head_ref || github.ref_name }}"
--event-type "${{ github.event_name }}" --actor "${{ github.actor }}"

View File

@ -10,6 +10,7 @@ repos:
hooks: hooks:
- id: ruff - id: ruff
args: [--fix, --exit-non-zero-on-fix] args: [--fix, --exit-non-zero-on-fix]
exclude: '^integration/benchmarks/'
- repo: https://github.com/RobertCraigie/pyright-python - repo: https://github.com/RobertCraigie/pyright-python
rev: v1.1.313 rev: v1.1.313

3
benchmarks/__init__.py Normal file
View File

@ -0,0 +1,3 @@
"""Reflex benchmarks."""
WINDOWS_SKIP_REASON = "Takes too much time as a result of npm"

20
benchmarks/conftest.py Normal file
View File

@ -0,0 +1,20 @@
"""Shared conftest for all benchmark tests."""
import pytest
from reflex.testing import AppHarness, AppHarnessProd
@pytest.fixture(
scope="session", params=[AppHarness, AppHarnessProd], ids=["dev", "prod"]
)
def app_harness_env(request):
"""Parametrize the AppHarness class to use for the test, either dev or prod.
Args:
request: The pytest fixture request object.
Returns:
The AppHarness class to use for the test.
"""
return request.param

View File

@ -0,0 +1,370 @@
"""Benchmark tests for apps with varying component numbers."""
from __future__ import annotations
import functools
import time
from typing import Generator
import pytest
from benchmarks import WINDOWS_SKIP_REASON
from reflex import constants
from reflex.compiler import utils
from reflex.testing import AppHarness, chdir
from reflex.utils import build
def render_component(num: int):
"""Generate a number of components based on num.
Args:
num: number of components to produce.
Returns:
The rendered number of components.
"""
import reflex as rx
return [
rx.fragment(
rx.box(
rx.accordion.root(
rx.accordion.item(
header="Full Ingredients", # type: ignore
content="Yes. It's built with accessibility in mind.", # type: ignore
font_size="3em",
),
rx.accordion.item(
header="Applications", # type: ignore
content="Yes. It's unstyled by default, giving you freedom over the look and feel.", # type: ignore
),
collapsible=True,
variant="ghost",
width="25rem",
),
padding_top="20px",
),
rx.box(
rx.drawer.root(
rx.drawer.trigger(
rx.button("Open Drawer with snap points"), as_child=True
),
rx.drawer.overlay(),
rx.drawer.portal(
rx.drawer.content(
rx.flex(
rx.drawer.title("Drawer Content"),
rx.drawer.description("Drawer description"),
rx.drawer.close(
rx.button("Close Button"),
as_child=True,
),
direction="column",
margin="5em",
align_items="center",
),
top="auto",
height="100%",
flex_direction="column",
background_color="var(--green-3)",
),
),
snap_points=["148px", "355px", 1],
),
),
rx.box(
rx.callout(
"You will need admin privileges to install and access this application.",
icon="info",
size="3",
),
),
rx.box(
rx.table.root(
rx.table.header(
rx.table.row(
rx.table.column_header_cell("Full name"),
rx.table.column_header_cell("Email"),
rx.table.column_header_cell("Group"),
),
),
rx.table.body(
rx.table.row(
rx.table.row_header_cell("Danilo Sousa"),
rx.table.cell("danilo@example.com"),
rx.table.cell("Developer"),
),
rx.table.row(
rx.table.row_header_cell("Zahra Ambessa"),
rx.table.cell("zahra@example.com"),
rx.table.cell("Admin"),
),
rx.table.row(
rx.table.row_header_cell("Jasper Eriksson"),
rx.table.cell("jasper@example.com"),
rx.table.cell("Developer"),
),
),
)
),
)
] * num
def AppWithTenComponentsOnePage():
"""A reflex app with roughly 10 components on one page."""
import reflex as rx
def index() -> rx.Component:
return rx.center(rx.vstack(*render_component(1)))
app = rx.App(state=rx.State)
app.add_page(index)
def AppWithHundredComponentOnePage():
"""A reflex app with roughly 100 components on one page."""
import reflex as rx
def index() -> rx.Component:
return rx.center(rx.vstack(*render_component(100)))
app = rx.App(state=rx.State)
app.add_page(index)
def AppWithThousandComponentsOnePage():
"""A reflex app with roughly 1000 components on one page."""
import reflex as rx
def index() -> rx.Component:
return rx.center(rx.vstack(*render_component(1000)))
app = rx.App(state=rx.State)
app.add_page(index)
@pytest.fixture(scope="session")
def app_with_10_components(
tmp_path_factory,
) -> Generator[AppHarness, None, None]:
"""Start Blank Template app at tmp_path via AppHarness.
Args:
tmp_path_factory: pytest tmp_path_factory fixture
Yields:
running AppHarness instance
"""
root = tmp_path_factory.mktemp("app10components")
yield AppHarness.create(
root=root,
app_source=functools.partial(
AppWithTenComponentsOnePage, render_component=render_component # type: ignore
),
) # type: ignore
@pytest.fixture(scope="session")
def app_with_100_components(
tmp_path_factory,
) -> Generator[AppHarness, None, None]:
"""Start Blank Template app at tmp_path via AppHarness.
Args:
tmp_path_factory: pytest tmp_path_factory fixture
Yields:
running AppHarness instance
"""
root = tmp_path_factory.mktemp("app100components")
yield AppHarness.create(
root=root,
app_source=functools.partial(
AppWithHundredComponentOnePage, render_component=render_component # type: ignore
),
) # type: ignore
@pytest.fixture(scope="session")
def app_with_1000_components(
tmp_path_factory,
) -> Generator[AppHarness, None, None]:
"""Create an app with 1000 components at tmp_path via AppHarness.
Args:
tmp_path_factory: pytest tmp_path_factory fixture
Yields:
an AppHarness instance
"""
root = tmp_path_factory.mktemp("app1000components")
yield AppHarness.create(
root=root,
app_source=functools.partial(
AppWithThousandComponentsOnePage, render_component=render_component # type: ignore
),
) # type: ignore
@pytest.mark.skipif(constants.IS_WINDOWS, reason=WINDOWS_SKIP_REASON)
@pytest.mark.benchmark(
group="Compile time of varying component numbers",
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_10_compile_time_cold(benchmark, app_with_10_components):
"""Test the compile time on a cold start for an app with roughly 10 components.
Args:
benchmark: The benchmark fixture.
app_with_10_components: The app harness.
"""
def setup():
with chdir(app_with_10_components.app_path):
utils.empty_dir(constants.Dirs.WEB_PAGES, keep_files=["_app.js"])
app_with_10_components._initialize_app()
build.setup_frontend(app_with_10_components.app_path)
def benchmark_fn():
with chdir(app_with_10_components.app_path):
app_with_10_components.app_instance.compile_()
benchmark.pedantic(benchmark_fn, setup=setup, rounds=10)
@pytest.mark.benchmark(
group="Compile time of varying component numbers",
min_rounds=5,
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_10_compile_time_warm(benchmark, app_with_10_components):
"""Test the compile time on a warm start for an app with roughly 10 components.
Args:
benchmark: The benchmark fixture.
app_with_10_components: The app harness.
"""
with chdir(app_with_10_components.app_path):
app_with_10_components._initialize_app()
build.setup_frontend(app_with_10_components.app_path)
def benchmark_fn():
with chdir(app_with_10_components.app_path):
app_with_10_components.app_instance.compile_()
benchmark(benchmark_fn)
@pytest.mark.skipif(constants.IS_WINDOWS, reason=WINDOWS_SKIP_REASON)
@pytest.mark.benchmark(
group="Compile time of varying component numbers",
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_100_compile_time_cold(benchmark, app_with_100_components):
"""Test the compile time on a cold start for an app with roughly 100 components.
Args:
benchmark: The benchmark fixture.
app_with_100_components: The app harness.
"""
def setup():
with chdir(app_with_100_components.app_path):
utils.empty_dir(constants.Dirs.WEB_PAGES, keep_files=["_app.js"])
app_with_100_components._initialize_app()
build.setup_frontend(app_with_100_components.app_path)
def benchmark_fn():
with chdir(app_with_100_components.app_path):
app_with_100_components.app_instance.compile_()
benchmark.pedantic(benchmark_fn, setup=setup, rounds=5)
@pytest.mark.benchmark(
group="Compile time of varying component numbers",
min_rounds=5,
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_100_compile_time_warm(benchmark, app_with_100_components):
"""Test the compile time on a warm start for an app with roughly 100 components.
Args:
benchmark: The benchmark fixture.
app_with_100_components: The app harness.
"""
with chdir(app_with_100_components.app_path):
app_with_100_components._initialize_app()
build.setup_frontend(app_with_100_components.app_path)
def benchmark_fn():
with chdir(app_with_100_components.app_path):
app_with_100_components.app_instance.compile_()
benchmark(benchmark_fn)
@pytest.mark.skipif(constants.IS_WINDOWS, reason=WINDOWS_SKIP_REASON)
@pytest.mark.benchmark(
group="Compile time of varying component numbers",
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_1000_compile_time_cold(benchmark, app_with_1000_components):
"""Test the compile time on a cold start for an app with roughly 1000 components.
Args:
benchmark: The benchmark fixture.
app_with_1000_components: The app harness.
"""
def setup():
with chdir(app_with_1000_components.app_path):
utils.empty_dir(constants.Dirs.WEB_PAGES, keep_files=["_app.js"])
app_with_1000_components._initialize_app()
build.setup_frontend(app_with_1000_components.app_path)
def benchmark_fn():
with chdir(app_with_1000_components.app_path):
app_with_1000_components.app_instance.compile_()
benchmark.pedantic(benchmark_fn, setup=setup, rounds=5)
@pytest.mark.benchmark(
group="Compile time of varying component numbers",
min_rounds=5,
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_1000_compile_time_warm(benchmark, app_with_1000_components):
"""Test the compile time on a warm start for an app with roughly 1000 components.
Args:
benchmark: The benchmark fixture.
app_with_1000_components: The app harness.
"""
with chdir(app_with_1000_components.app_path):
app_with_1000_components._initialize_app()
build.setup_frontend(app_with_1000_components.app_path)
def benchmark_fn():
with chdir(app_with_1000_components.app_path):
app_with_1000_components.app_instance.compile_()
benchmark(benchmark_fn)

View File

@ -0,0 +1,557 @@
"""Benchmark tests for apps with varying page numbers."""
from __future__ import annotations
import functools
import time
from typing import Generator
import pytest
from benchmarks import WINDOWS_SKIP_REASON
from reflex import constants
from reflex.compiler import utils
from reflex.testing import AppHarness, chdir
from reflex.utils import build
def render_multiple_pages(app, num: int):
"""Add multiple pages based on num.
Args:
app: The App object.
num: number of pages to render.
"""
from typing import Tuple
from rxconfig import config # type: ignore
import reflex as rx
docs_url = "https://reflex.dev/docs/getting-started/introduction/"
filename = f"{config.app_name}/{config.app_name}.py"
college = [
"Stanford University",
"Arizona",
"Arizona state",
"Baylor",
"Boston College",
"Boston University",
]
class State(rx.State):
"""The app state."""
position: str
college: str
age: Tuple[int, int] = (18, 50)
salary: Tuple[int, int] = (0, 25000000)
comp1 = rx.center(
rx.theme_panel(),
rx.vstack(
rx.heading("Welcome to Reflex!", size="9"),
rx.text("Get started by editing ", rx.code(filename)),
rx.button(
"Check out our docs!",
on_click=lambda: rx.redirect(docs_url),
size="4",
),
align="center",
spacing="7",
font_size="2em",
),
height="100vh",
)
comp2 = rx.vstack(
rx.hstack(
rx.vstack(
rx.select(
["C", "PF", "SF", "PG", "SG"],
placeholder="Select a position. (All)",
on_change=State.set_position, # type: ignore
size="3",
),
rx.select(
college,
placeholder="Select a college. (All)",
on_change=State.set_college, # type: ignore
size="3",
),
),
rx.vstack(
rx.vstack(
rx.hstack(
rx.badge("Min Age: ", State.age[0]),
rx.divider(orientation="vertical"),
rx.badge("Max Age: ", State.age[1]),
),
rx.slider(
default_value=[18, 50],
min=18,
max=50,
on_value_commit=State.set_age, # type: ignore
),
align_items="left",
width="100%",
),
rx.vstack(
rx.hstack(
rx.badge("Min Sal: ", State.salary[0] // 1000000, "M"),
rx.divider(orientation="vertical"),
rx.badge("Max Sal: ", State.salary[1] // 1000000, "M"),
),
rx.slider(
default_value=[0, 25000000],
min=0,
max=25000000,
on_value_commit=State.set_salary, # type: ignore
),
align_items="left",
width="100%",
),
),
spacing="4",
),
width="100%",
)
for i in range(1, num + 1):
if i % 2 == 1:
app.add_page(comp1, route=f"page{i}")
else:
app.add_page(comp2, route=f"page{i}")
def AppWithOnePage():
"""A reflex app with one page."""
from rxconfig import config # type: ignore
import reflex as rx
docs_url = "https://reflex.dev/docs/getting-started/introduction/"
filename = f"{config.app_name}/{config.app_name}.py"
class State(rx.State):
"""The app state."""
pass
def index() -> rx.Component:
return rx.center(
rx.chakra.input(
id="token", value=State.router.session.client_token, is_read_only=True
),
rx.vstack(
rx.heading("Welcome to Reflex!", size="9"),
rx.text("Get started by editing ", rx.code(filename)),
rx.button(
"Check out our docs!",
on_click=lambda: rx.redirect(docs_url),
size="4",
),
align="center",
spacing="7",
font_size="2em",
),
height="100vh",
)
app = rx.App(state=rx.State)
app.add_page(index)
def AppWithTenPages():
"""A reflex app with 10 pages."""
import reflex as rx
app = rx.App(state=rx.State)
render_multiple_pages(app, 10)
def AppWithHundredPages():
"""A reflex app with 100 pages."""
import reflex as rx
app = rx.App(state=rx.State)
render_multiple_pages(app, 100)
def AppWithThousandPages():
"""A reflex app with Thousand pages."""
import reflex as rx
app = rx.App(state=rx.State)
render_multiple_pages(app, 1000)
def AppWithTenThousandPages():
"""A reflex app with ten thousand pages."""
import reflex as rx
app = rx.App(state=rx.State)
render_multiple_pages(app, 10000)
@pytest.fixture(scope="session")
def app_with_one_page(
tmp_path_factory,
) -> Generator[AppHarness, None, None]:
"""Create an app with 10000 pages at tmp_path via AppHarness.
Args:
tmp_path_factory: pytest tmp_path_factory fixture
Yields:
an AppHarness instance
"""
root = tmp_path_factory.mktemp(f"app1")
yield AppHarness.create(root=root, app_source=AppWithOnePage) # type: ignore
@pytest.fixture(scope="session")
def app_with_ten_pages(
tmp_path_factory,
) -> Generator[AppHarness, None, None]:
"""Create an app with 10 pages at tmp_path via AppHarness.
Args:
tmp_path_factory: pytest tmp_path_factory fixture
Yields:
an AppHarness instance
"""
root = tmp_path_factory.mktemp(f"app10")
yield AppHarness.create(root=root, app_source=functools.partial(AppWithTenPages, render_comp=render_multiple_pages)) # type: ignore
@pytest.fixture(scope="session")
def app_with_hundred_pages(
tmp_path_factory,
) -> Generator[AppHarness, None, None]:
"""Create an app with 100 pages at tmp_path via AppHarness.
Args:
tmp_path_factory: pytest tmp_path_factory fixture
Yields:
an AppHarness instance
"""
root = tmp_path_factory.mktemp(f"app100")
yield AppHarness.create(
root=root,
app_source=functools.partial(
AppWithHundredPages, render_comp=render_multiple_pages # type: ignore
),
) # type: ignore
@pytest.fixture(scope="session")
def app_with_thousand_pages(
tmp_path_factory,
) -> Generator[AppHarness, None, None]:
"""Create an app with 1000 pages at tmp_path via AppHarness.
Args:
tmp_path_factory: pytest tmp_path_factory fixture
Yields:
an AppHarness instance
"""
root = tmp_path_factory.mktemp(f"app1000")
yield AppHarness.create(
root=root,
app_source=functools.partial( # type: ignore
AppWithThousandPages, render_comp=render_multiple_pages # type: ignore
),
) # type: ignore
@pytest.fixture(scope="session")
def app_with_ten_thousand_pages(
tmp_path_factory,
) -> Generator[AppHarness, None, None]:
"""Create an app with 10000 pages at tmp_path via AppHarness.
Args:
tmp_path_factory: pytest tmp_path_factory fixture
Yields:
running AppHarness instance
"""
root = tmp_path_factory.mktemp(f"app10000")
yield AppHarness.create(
root=root,
app_source=functools.partial(
AppWithTenThousandPages, render_comp=render_multiple_pages # type: ignore
),
) # type: ignore
@pytest.mark.skipif(constants.IS_WINDOWS, reason=WINDOWS_SKIP_REASON)
@pytest.mark.benchmark(
group="Compile time of varying page numbers",
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_1_compile_time_cold(benchmark, app_with_one_page):
"""Test the compile time on a cold start for an app with 1 page.
Args:
benchmark: The benchmark fixture.
app_with_one_page: The app harness.
"""
def setup():
with chdir(app_with_one_page.app_path):
utils.empty_dir(constants.Dirs.WEB_PAGES, keep_files=["_app.js"])
app_with_one_page._initialize_app()
build.setup_frontend(app_with_one_page.app_path)
def benchmark_fn():
with chdir(app_with_one_page.app_path):
app_with_one_page.app_instance.compile_()
benchmark.pedantic(benchmark_fn, setup=setup, rounds=5)
@pytest.mark.benchmark(
group="Compile time of varying page numbers",
min_rounds=5,
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_1_compile_time_warm(benchmark, app_with_one_page):
"""Test the compile time on a warm start for an app with 1 page.
Args:
benchmark: The benchmark fixture.
app_with_one_page: The app harness.
"""
with chdir(app_with_one_page.app_path):
app_with_one_page._initialize_app()
build.setup_frontend(app_with_one_page.app_path)
def benchmark_fn():
with chdir(app_with_one_page.app_path):
app_with_one_page.app_instance.compile_()
benchmark(benchmark_fn)
@pytest.mark.skipif(constants.IS_WINDOWS, reason=WINDOWS_SKIP_REASON)
@pytest.mark.benchmark(
group="Compile time of varying page numbers",
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_10_compile_time_cold(benchmark, app_with_ten_pages):
"""Test the compile time on a cold start for an app with 10 page.
Args:
benchmark: The benchmark fixture.
app_with_ten_pages: The app harness.
"""
def setup():
with chdir(app_with_ten_pages.app_path):
utils.empty_dir(constants.Dirs.WEB_PAGES, keep_files=["_app.js"])
app_with_ten_pages._initialize_app()
build.setup_frontend(app_with_ten_pages.app_path)
def benchmark_fn():
with chdir(app_with_ten_pages.app_path):
app_with_ten_pages.app_instance.compile_()
benchmark.pedantic(benchmark_fn, setup=setup, rounds=5)
@pytest.mark.benchmark(
group="Compile time of varying page numbers",
min_rounds=5,
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_10_compile_time_warm(benchmark, app_with_ten_pages):
"""Test the compile time on a warm start for an app with 10 page.
Args:
benchmark: The benchmark fixture.
app_with_ten_pages: The app harness.
"""
with chdir(app_with_ten_pages.app_path):
app_with_ten_pages._initialize_app()
build.setup_frontend(app_with_ten_pages.app_path)
def benchmark_fn():
with chdir(app_with_ten_pages.app_path):
app_with_ten_pages.app_instance.compile_()
benchmark(benchmark_fn)
@pytest.mark.skipif(constants.IS_WINDOWS, reason=WINDOWS_SKIP_REASON)
@pytest.mark.benchmark(
group="Compile time of varying page numbers",
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_100_compile_time_cold(benchmark, app_with_hundred_pages):
"""Test the compile time on a cold start for an app with 100 page.
Args:
benchmark: The benchmark fixture.
app_with_hundred_pages: The app harness.
"""
def setup():
with chdir(app_with_hundred_pages.app_path):
utils.empty_dir(constants.Dirs.WEB_PAGES, keep_files=["_app.js"])
app_with_hundred_pages._initialize_app()
build.setup_frontend(app_with_hundred_pages.app_path)
def benchmark_fn():
with chdir(app_with_hundred_pages.app_path):
app_with_hundred_pages.app_instance.compile_()
benchmark.pedantic(benchmark_fn, setup=setup, rounds=5)
@pytest.mark.benchmark(
group="Compile time of varying page numbers",
min_rounds=5,
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_100_compile_time_warm(benchmark, app_with_hundred_pages):
"""Test the compile time on a warm start for an app with 100 page.
Args:
benchmark: The benchmark fixture.
app_with_hundred_pages: The app harness.
"""
with chdir(app_with_hundred_pages.app_path):
app_with_hundred_pages._initialize_app()
build.setup_frontend(app_with_hundred_pages.app_path)
def benchmark_fn():
with chdir(app_with_hundred_pages.app_path):
app_with_hundred_pages.app_instance.compile_()
benchmark(benchmark_fn)
@pytest.mark.skipif(constants.IS_WINDOWS, reason=WINDOWS_SKIP_REASON)
@pytest.mark.benchmark(
group="Compile time of varying page numbers",
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_1000_compile_time_cold(benchmark, app_with_thousand_pages):
"""Test the compile time on a cold start for an app with 1000 page.
Args:
benchmark: The benchmark fixture.
app_with_thousand_pages: The app harness.
"""
def setup():
with chdir(app_with_thousand_pages.app_path):
utils.empty_dir(constants.Dirs.WEB_PAGES, keep_files=["_app.js"])
app_with_thousand_pages._initialize_app()
build.setup_frontend(app_with_thousand_pages.app_path)
def benchmark_fn():
with chdir(app_with_thousand_pages.app_path):
app_with_thousand_pages.app_instance.compile_()
benchmark.pedantic(benchmark_fn, setup=setup, rounds=5)
@pytest.mark.benchmark(
group="Compile time of varying page numbers",
min_rounds=5,
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_1000_compile_time_warm(benchmark, app_with_thousand_pages):
"""Test the compile time on a warm start for an app with 1000 page.
Args:
benchmark: The benchmark fixture.
app_with_thousand_pages: The app harness.
"""
with chdir(app_with_thousand_pages.app_path):
app_with_thousand_pages._initialize_app()
build.setup_frontend(app_with_thousand_pages.app_path)
def benchmark_fn():
with chdir(app_with_thousand_pages.app_path):
app_with_thousand_pages.app_instance.compile_()
benchmark(benchmark_fn)
@pytest.mark.skip
@pytest.mark.benchmark(
group="Compile time of varying page numbers",
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_10000_compile_time_cold(benchmark, app_with_ten_thousand_pages):
"""Test the compile time on a cold start for an app with 10000 page.
Args:
benchmark: The benchmark fixture.
app_with_ten_thousand_pages: The app harness.
"""
def setup():
with chdir(app_with_ten_thousand_pages.app_path):
utils.empty_dir(constants.Dirs.WEB_PAGES, keep_files=["_app.js"])
app_with_ten_thousand_pages._initialize_app()
build.setup_frontend(app_with_ten_thousand_pages.app_path)
def benchmark_fn():
with chdir(app_with_ten_thousand_pages.app_path):
app_with_ten_thousand_pages.app_instance.compile_()
benchmark.pedantic(benchmark_fn, setup=setup, rounds=5)
@pytest.mark.skip
@pytest.mark.benchmark(
group="Compile time of varying page numbers",
min_rounds=5,
timer=time.perf_counter,
disable_gc=True,
warmup=False,
)
def test_app_10000_compile_time_warm(benchmark, app_with_ten_thousand_pages):
"""Test the compile time on a warm start for an app with 10000 page.
Args:
benchmark: The benchmark fixture.
app_with_ten_thousand_pages: The app harness.
"""
def benchmark_fn():
with chdir(app_with_ten_thousand_pages.app_path):
app_with_ten_thousand_pages.app_instance.compile_()
benchmark(benchmark_fn)

View File

@ -1,49 +0,0 @@
"""Helper functions for the benchmarking integration."""
import json
from datetime import datetime
import psycopg2
def insert_benchmarking_data(
db_connection_url: str,
lighthouse_data: dict,
performance_data: list[dict],
commit_sha: str,
pr_title: str,
):
"""Insert the benchmarking data into the database.
Args:
db_connection_url: The URL to connect to the database.
lighthouse_data: The Lighthouse data to insert.
performance_data: The performance data to insert.
commit_sha: The commit SHA to insert.
pr_title: The PR title to insert.
"""
# Serialize the JSON data
lighthouse_json = json.dumps(lighthouse_data)
performance_json = json.dumps(performance_data)
# Get the current timestamp
current_timestamp = datetime.now()
# Connect to the database and insert the data
with psycopg2.connect(db_connection_url) as conn, conn.cursor() as cursor:
insert_query = """
INSERT INTO benchmarks (lighthouse, performance, commit_sha, pr_title, time)
VALUES (%s, %s, %s, %s, %s);
"""
cursor.execute(
insert_query,
(
lighthouse_json,
performance_json,
commit_sha,
pr_title,
current_timestamp,
),
)
# Commit the transaction
conn.commit()

View File

@ -1,121 +0,0 @@
"""Benchmark the time it takes to compile a reflex app."""
import importlib
import reflex
rx = reflex
class State(rx.State):
"""A simple state class with a count variable."""
count: int = 0
def increment(self):
"""Increment the count."""
self.count += 1
def decrement(self):
"""Decrement the count."""
self.count -= 1
class SliderVariation(State):
"""A simple state class with a count variable."""
value: int = 50
def set_end(self, value: int):
"""Increment the count.
Args:
value: The value of the slider.
"""
self.value = value
def sample_small_page() -> rx.Component:
"""A simple page with a button that increments the count.
Returns:
A reflex component.
"""
return rx.vstack(
*[rx.button(State.count, font_size="2em") for i in range(100)],
gap="1em",
)
def sample_large_page() -> rx.Component:
"""A large page with a slider that increments the count.
Returns:
A reflex component.
"""
return rx.vstack(
*[
rx.vstack(
rx.heading(SliderVariation.value),
rx.slider(on_change_end=SliderVariation.set_end),
width="100%",
)
for i in range(100)
],
gap="1em",
)
def add_small_pages(app: rx.App):
"""Add 10 small pages to the app.
Args:
app: The reflex app to add the pages to.
"""
for i in range(10):
app.add_page(sample_small_page, route=f"/{i}")
def add_large_pages(app: rx.App):
"""Add 10 large pages to the app.
Args:
app: The reflex app to add the pages to.
"""
for i in range(10):
app.add_page(sample_large_page, route=f"/{i}")
def test_mean_import_time(benchmark):
"""Test that the mean import time is less than 1 second.
Args:
benchmark: The benchmark fixture.
"""
def import_reflex():
importlib.reload(reflex)
# Benchmark the import
benchmark(import_reflex)
def test_mean_add_small_page_time(benchmark):
"""Test that the mean add page time is less than 1 second.
Args:
benchmark: The benchmark fixture.
"""
app = rx.App(state=State)
benchmark(add_small_pages, app)
def test_mean_add_large_page_time(benchmark):
"""Test that the mean add page time is less than 1 second.
Args:
benchmark: The benchmark fixture.
"""
app = rx.App(state=State)
results = benchmark(add_large_pages, app)
print(results)

View File

@ -1,4 +1,5 @@
"""Shared conftest for all integration tests.""" """Shared conftest for all integration tests."""
import os import os
import re import re
from pathlib import Path from pathlib import Path
@ -20,7 +21,7 @@ def xvfb():
Yields: Yields:
the pyvirtualdisplay object that the browser will be open on the pyvirtualdisplay object that the browser will be open on
""" """
if os.environ.get("GITHUB_ACTIONS"): if os.environ.get("GITHUB_ACTIONS") and not os.environ.get("APP_HARNESS_HEADLESS"):
from pyvirtualdisplay.smartdisplay import ( # pyright: ignore [reportMissingImports] from pyvirtualdisplay.smartdisplay import ( # pyright: ignore [reportMissingImports]
SmartDisplay, SmartDisplay,
) )

View File

@ -35,7 +35,7 @@ def VarOperations():
app = rx.App(state=rx.State) app = rx.App(state=rx.State)
@rx.memo @rx.memo
def memo_comp(list1: list[int], int_var1: int, id: str): def memo_comp(list1: List[int], int_var1: int, id: str):
return rx.text(list1, int_var1, id=id) return rx.text(list1, int_var1, id=id)
@rx.memo @rx.memo

View File

@ -1241,7 +1241,10 @@ class EventNamespace(AsyncNamespace):
} }
# Get the client IP # Get the client IP
client_ip = environ["REMOTE_ADDR"] try:
client_ip = environ["asgi.scope"]["client"][0]
except (KeyError, IndexError):
client_ip = environ.get("REMOTE_ADDR", "0.0.0.0")
# Process the events. # Process the events.
async for update in process(self.app, event, sid, headers, client_ip): async for update in process(self.app, event, sid, headers, client_ip):

View File

@ -5,7 +5,7 @@ from __future__ import annotations
import os import os
from collections import defaultdict from collections import defaultdict
from pathlib import Path from pathlib import Path
from typing import Any, Optional from typing import Any, ClassVar, Optional, Type, Union
import alembic.autogenerate import alembic.autogenerate
import alembic.command import alembic.command
@ -51,6 +51,88 @@ def get_engine(url: str | None = None):
return sqlmodel.create_engine(url, echo=echo_db_query, connect_args=connect_args) return sqlmodel.create_engine(url, echo=echo_db_query, connect_args=connect_args)
SQLModelOrSqlAlchemy = Union[
Type[sqlmodel.SQLModel], Type[sqlalchemy.orm.DeclarativeBase]
]
class ModelRegistry:
"""Registry for all models."""
models: ClassVar[set[SQLModelOrSqlAlchemy]] = set()
# Cache the metadata to avoid re-creating it.
_metadata: ClassVar[sqlalchemy.MetaData | None] = None
@classmethod
def register(cls, model: SQLModelOrSqlAlchemy):
"""Register a model. Can be used directly or as a decorator.
Args:
model: The model to register.
Returns:
The model passed in as an argument (Allows decorator usage)
"""
cls.models.add(model)
return model
@classmethod
def get_models(cls, include_empty: bool = False) -> set[SQLModelOrSqlAlchemy]:
"""Get registered models.
Args:
include_empty: If True, include models with empty metadata.
Returns:
The registered models.
"""
if include_empty:
return cls.models
return {
model for model in cls.models if not cls._model_metadata_is_empty(model)
}
@staticmethod
def _model_metadata_is_empty(model: SQLModelOrSqlAlchemy) -> bool:
"""Check if the model metadata is empty.
Args:
model: The model to check.
Returns:
True if the model metadata is empty, False otherwise.
"""
return len(model.metadata.tables) == 0
@classmethod
def get_metadata(cls) -> sqlalchemy.MetaData:
"""Get the database metadata.
Returns:
The database metadata.
"""
if cls._metadata is not None:
return cls._metadata
models = cls.get_models(include_empty=False)
if len(models) == 1:
metadata = next(iter(models)).metadata
else:
# Merge the metadata from all the models.
# This allows mixing bare sqlalchemy models with sqlmodel models in one database.
metadata = sqlalchemy.MetaData()
for model in cls.get_models():
for table in model.metadata.tables.values():
table.to_metadata(metadata)
# Cache the metadata
cls._metadata = metadata
return metadata
class Model(Base, sqlmodel.SQLModel): class Model(Base, sqlmodel.SQLModel):
"""Base class to define a table in the database.""" """Base class to define a table in the database."""
@ -113,7 +195,7 @@ class Model(Base, sqlmodel.SQLModel):
def create_all(): def create_all():
"""Create all the tables.""" """Create all the tables."""
engine = get_engine() engine = get_engine()
sqlmodel.SQLModel.metadata.create_all(engine) ModelRegistry.get_metadata().create_all(engine)
@staticmethod @staticmethod
def get_db_engine(): def get_db_engine():
@ -224,7 +306,7 @@ class Model(Base, sqlmodel.SQLModel):
) as env: ) as env:
env.configure( env.configure(
connection=connection, connection=connection,
target_metadata=sqlmodel.SQLModel.metadata, target_metadata=ModelRegistry.get_metadata(),
render_item=cls._alembic_render_item, render_item=cls._alembic_render_item,
process_revision_directives=writer, # type: ignore process_revision_directives=writer, # type: ignore
compare_type=False, compare_type=False,
@ -300,7 +382,6 @@ class Model(Base, sqlmodel.SQLModel):
return True return True
@classmethod @classmethod
@property
def select(cls): def select(cls):
"""Select rows from the table. """Select rows from the table.
@ -310,6 +391,9 @@ class Model(Base, sqlmodel.SQLModel):
return sqlmodel.select(cls) return sqlmodel.select(cls)
ModelRegistry.register(Model)
def session(url: str | None = None) -> sqlmodel.Session: def session(url: str | None = None) -> sqlmodel.Session:
"""Get a session to interact with the database. """Get a session to interact with the database.

View File

@ -211,7 +211,9 @@ class AppHarness:
# get the source from a function or module object # get the source from a function or module object
source_code = "\n".join( source_code = "\n".join(
[ [
"\n".join(f"{k} = {v!r}" for k, v in app_globals.items()), "\n".join(
self.get_app_global_source(k, v) for k, v in app_globals.items()
),
self._get_source_from_app_source(self.app_source), self._get_source_from_app_source(self.app_source),
] ]
) )
@ -331,6 +333,24 @@ class AppHarness:
self._wait_frontend() self._wait_frontend()
return self return self
@staticmethod
def get_app_global_source(key, value):
"""Get the source code of a global object.
If value is a function or class we render the actual
source of value otherwise we assign value to key.
Args:
key: variable name to assign value to.
value: value of the global variable.
Returns:
The rendered app global code.
"""
if not inspect.isclass(value) and not inspect.isfunction(value):
return f"{key} = {value!r}"
return inspect.getsource(value)
def __enter__(self) -> "AppHarness": def __enter__(self) -> "AppHarness":
"""Contextmanager protocol for `start()`. """Contextmanager protocol for `start()`.

View File

@ -6,7 +6,7 @@ import inspect
import json import json
import os import os
import re import re
from typing import TYPE_CHECKING, Any, List, Union from typing import TYPE_CHECKING, Any, List, Optional, Union
from reflex import constants from reflex import constants
from reflex.utils import exceptions, serializers, types from reflex.utils import exceptions, serializers, types
@ -603,11 +603,12 @@ def format_query_params(router_data: dict[str, Any]) -> dict[str, str]:
return {k.replace("-", "_"): v for k, v in params.items()} return {k.replace("-", "_"): v for k, v in params.items()}
def format_state(value: Any) -> Any: def format_state(value: Any, key: Optional[str] = None) -> Any:
"""Recursively format values in the given state. """Recursively format values in the given state.
Args: Args:
value: The state to format. value: The state to format.
key: The key associated with the value (optional).
Returns: Returns:
The formatted state. The formatted state.
@ -617,7 +618,7 @@ def format_state(value: Any) -> Any:
""" """
# Handle dicts. # Handle dicts.
if isinstance(value, dict): if isinstance(value, dict):
return {k: format_state(v) for k, v in value.items()} return {k: format_state(v, k) for k, v in value.items()}
# Handle lists, sets, typles. # Handle lists, sets, typles.
if isinstance(value, types.StateIterBases): if isinstance(value, types.StateIterBases):
@ -632,7 +633,14 @@ def format_state(value: Any) -> Any:
if serialized is not None: if serialized is not None:
return serialized return serialized
raise TypeError(f"No JSON serializer found for var {value} of type {type(value)}.") if key is None:
raise TypeError(
f"No JSON serializer found for var {value} of type {type(value)}."
)
else:
raise TypeError(
f"No JSON serializer found for State Var '{key}' of value {value} of type {type(value)}."
)
def format_state_name(state_name: str) -> str: def format_state_name(state_name: str) -> str:

View File

@ -1,11 +1,52 @@
"""Runs the benchmarks and inserts the results into the database.""" """Runs the benchmarks and inserts the results into the database."""
from __future__ import annotations
import json import json
import os import os
import sys import sys
from datetime import datetime
import pytest import psycopg2
from helpers import insert_benchmarking_data
def insert_benchmarking_data(
db_connection_url: str,
lighthouse_data: dict,
commit_sha: str,
pr_title: str,
):
"""Insert the benchmarking data into the database.
Args:
db_connection_url: The URL to connect to the database.
lighthouse_data: The Lighthouse data to insert.
commit_sha: The commit SHA to insert.
pr_title: The PR title to insert.
"""
# Serialize the JSON data
lighthouse_json = json.dumps(lighthouse_data)
# Get the current timestamp
current_timestamp = datetime.now()
# Connect to the database and insert the data
with psycopg2.connect(db_connection_url) as conn, conn.cursor() as cursor:
insert_query = """
INSERT INTO benchmarks (lighthouse, commit_sha, pr_title, time)
VALUES (%s, %s, %s, %s);
"""
cursor.execute(
insert_query,
(
lighthouse_json,
commit_sha,
pr_title,
current_timestamp,
),
)
# Commit the transaction
conn.commit()
def get_lighthouse_scores(directory_path: str) -> dict: def get_lighthouse_scores(directory_path: str) -> dict:
@ -44,70 +85,6 @@ def get_lighthouse_scores(directory_path: str) -> dict:
return scores return scores
def run_pytest_and_get_results(test_path=None) -> dict:
"""Runs pytest and returns the results.
Args:
test_path: The path to the tests to run.
Returns:
dict: The results of the tests.
"""
# Set the default path to the current directory if no path is provided
if not test_path:
test_path = os.getcwd()
# Ensure you have installed the pytest-json plugin before running this
pytest_args = ["-v", "--benchmark-json=benchmark_report.json", test_path]
# Run pytest with the specified arguments
pytest.main(pytest_args)
# Print ls of the current directory
print(os.listdir())
with open("benchmark_report.json", "r") as file:
pytest_results = json.load(file)
return pytest_results
def extract_stats_from_json(json_data) -> list[dict]:
"""Extracts the stats from the JSON data and returns them as a list of dictionaries.
Args:
json_data: The JSON data to extract the stats from.
Returns:
list[dict]: The stats for each test.
"""
# Load the JSON data if it is a string, otherwise assume it's already a dictionary
data = json.loads(json_data) if isinstance(json_data, str) else json_data
# Initialize an empty list to store the stats for each test
test_stats = []
# Iterate over each test in the 'benchmarks' list
for test in data.get("benchmarks", []):
stats = test.get("stats", {})
test_name = test.get("name", "Unknown Test")
min_value = stats.get("min", None)
max_value = stats.get("max", None)
mean_value = stats.get("mean", None)
stdev_value = stats.get("stddev", None)
test_stats.append(
{
"test_name": test_name,
"min": min_value,
"max": max_value,
"mean": mean_value,
"stdev": stdev_value,
}
)
return test_stats
def main(): def main():
"""Runs the benchmarks and inserts the results into the database.""" """Runs the benchmarks and inserts the results into the database."""
# Get the commit SHA and JSON directory from the command line arguments # Get the commit SHA and JSON directory from the command line arguments
@ -121,17 +98,11 @@ def main():
if db_url is None or pr_title is None: if db_url is None or pr_title is None:
sys.exit("Missing environment variables") sys.exit("Missing environment variables")
# Run pytest and get the results
results = run_pytest_and_get_results()
cleaned_results = extract_stats_from_json(results)
# Get the Lighthouse scores # Get the Lighthouse scores
lighthouse_scores = get_lighthouse_scores(json_dir) lighthouse_scores = get_lighthouse_scores(json_dir)
# Insert the data into the database # Insert the data into the database
insert_benchmarking_data( insert_benchmarking_data(db_url, lighthouse_scores, commit_sha, pr_title)
db_url, lighthouse_scores, cleaned_results, commit_sha, pr_title
)
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -0,0 +1,166 @@
"""Runs the benchmarks and inserts the results into the database."""
from __future__ import annotations
import argparse
import json
from datetime import datetime
import psycopg2
def extract_stats_from_json(json_file: str) -> list[dict]:
"""Extracts the stats from the JSON data and returns them as a list of dictionaries.
Args:
json_file: The JSON file to extract the stats data from.
Returns:
list[dict]: The stats for each test.
"""
with open(json_file, "r") as file:
json_data = json.load(file)
# Load the JSON data if it is a string, otherwise assume it's already a dictionary
data = json.loads(json_data) if isinstance(json_data, str) else json_data
# Initialize an empty list to store the stats for each test
test_stats = []
# Iterate over each test in the 'benchmarks' list
for test in data.get("benchmarks", []):
stats = test.get("stats", {})
test_name = test.get("name", "Unknown Test")
min_value = stats.get("min", None)
max_value = stats.get("max", None)
mean_value = stats.get("mean", None)
stdev_value = stats.get("stddev", None)
test_stats.append(
{
"test_name": test_name,
"min": min_value,
"max": max_value,
"mean": mean_value,
"stdev": stdev_value,
}
)
return test_stats
def insert_benchmarking_data(
db_connection_url: str,
os_type_version: str,
python_version: str,
performance_data: list[dict],
commit_sha: str,
pr_title: str,
branch_name: str,
event_type: str,
actor: str,
):
"""Insert the benchmarking data into the database.
Args:
db_connection_url: The URL to connect to the database.
os_type_version: The OS type and version to insert.
python_version: The Python version to insert.
performance_data: The performance data of reflex web to insert.
commit_sha: The commit SHA to insert.
pr_title: The PR title to insert.
branch_name: The name of the branch.
event_type: Type of github event(push, pull request, etc)
actor: Username of the user that triggered the run.
"""
# Serialize the JSON data
simple_app_performance_json = json.dumps(performance_data)
# Get the current timestamp
current_timestamp = datetime.now()
# Connect to the database and insert the data
with psycopg2.connect(db_connection_url) as conn, conn.cursor() as cursor:
insert_query = """
INSERT INTO simple_app_benchmarks (os, python_version, commit_sha, time, pr_title, branch_name, event_type, actor, performance)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s);
"""
cursor.execute(
insert_query,
(
os_type_version,
python_version,
commit_sha,
current_timestamp,
pr_title,
branch_name,
event_type,
actor,
simple_app_performance_json,
),
)
# Commit the transaction
conn.commit()
def main():
"""Runs the benchmarks and inserts the results."""
# Get the commit SHA and JSON directory from the command line arguments
parser = argparse.ArgumentParser(description="Run benchmarks and process results.")
parser.add_argument(
"--os", help="The OS type and version to insert into the database."
)
parser.add_argument(
"--python-version", help="The Python version to insert into the database."
)
parser.add_argument(
"--commit-sha", help="The commit SHA to insert into the database."
)
parser.add_argument(
"--benchmark-json",
help="The JSON file containing the benchmark results.",
)
parser.add_argument(
"--db-url",
help="The URL to connect to the database.",
required=True,
)
parser.add_argument(
"--pr-title",
help="The PR title to insert into the database.",
required=True,
)
parser.add_argument(
"--branch-name",
help="The current branch",
required=True,
)
parser.add_argument(
"--event-type",
help="The github event type",
required=True,
)
parser.add_argument(
"--actor",
help="Username of the user that triggered the run.",
required=True,
)
args = parser.parse_args()
# Get the results of pytest benchmarks
cleaned_benchmark_results = extract_stats_from_json(args.benchmark_json)
# Insert the data into the database
insert_benchmarking_data(
db_connection_url=args.db_url,
os_type_version=args.os,
python_version=args.python_version,
performance_data=cleaned_benchmark_results,
commit_sha=args.commit_sha,
pr_title=args.pr_title,
branch_name=args.branch_name,
event_type=args.event_type,
actor=args.actor,
)
if __name__ == "__main__":
main()