diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index bfbbe60cc..ee4186650 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -3,10 +3,20 @@ name: ci
on:
push:
branches:
- - master
+ - main
+ paths:
+ - 'src/**'
+ - 'test/**'
+ - '.github/workflows/*.yml'
+ - 'pom.xml'
pull_request:
branches:
- - master
+ - main
+ paths:
+ - 'src/**'
+ - 'test/**'
+ - '.github/workflows/*.yml'
+ - 'pom.xml'
jobs:
misc:
@@ -14,38 +24,46 @@ jobs:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- with:
- fetch-depth: 0
+ - uses: actions/checkout@v4
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: actions/setup-java@v4
with:
- java-version: 11
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
- name: Verify
run: mvn -B verify -DskipTests=true
- name: Misc Tests
- run: mvn -B '-Dtest=!sqlancer.dbms.**' test
+ run: mvn -Djacoco.skip=true -B '-Dtest=!sqlancer.dbms.**,!sqlancer.qpg.**' test
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.10'
+ - name: Naming Convention Tests
+ run: python src/check_names.py
citus:
name: DBMS Tests (Citus)
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- with:
- fetch-depth: 0
+ - uses: actions/checkout@v4
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: actions/setup-java@v4
with:
- java-version: 11
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
- name: Build SQLancer
run: mvn -B package -DskipTests=true
- name: Set up Citus
run: |
echo "deb http://apt.postgresql.org/pub/repos/apt/ `lsb_release -cs`-pgdg main" | sudo tee /etc/apt/sources.list.d/pgdg.list
curl https://install.citusdata.com/community/deb.sh | sudo bash
- sudo apt-get -y install postgresql-13-citus-10.1
+ sudo sed -i 's/noble/jammy/g' /etc/apt/sources.list.d/citusdata_community.list # https://github.com/citusdata/citus/issues/7692
+ sudo apt-get update
+ sudo apt-get -y install postgresql-17-citus-13.0
sudo chown -R $USER:$USER /var/run/postgresql
- export PATH=/usr/lib/postgresql/13/bin:$PATH
+ export PATH=/usr/lib/postgresql/17/bin:$PATH
cd ~
mkdir -p citus/coordinator citus/worker1 citus/worker2
initdb -D citus/coordinator
@@ -70,281 +88,628 @@ jobs:
psql -c "SELECT * from citus_add_node('localhost', 9701);" -p 9700 -U $USER -d test
psql -c "SELECT * from citus_add_node('localhost', 9702);" -p 9700 -U $USER -d test
- name: Run Tests
- run: CITUS_AVAILABLE=true mvn -Dtest=TestCitus test
+ run: CITUS_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestCitus test
+
+ cnosdb:
+ name: DBMS Tests (CnosDB, creation only)
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up JDK 11
+ uses: actions/setup-java@v4
+ with:
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
+ - name: Build SQLancer
+ run: mvn -B package -DskipTests=true
+ - name: Set up CnosDB
+ run: |
+ docker pull cnosdb/cnosdb:community-latest
+ docker run --name cnosdb -p 8902:8902 -d cnosdb/cnosdb:community-latest
+ until nc -z 127.0.0.1 8902 2>/dev/null; do sleep 1; done
+ - name: Run Tests
+ run: |
+ CNOSDB_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestCnosDBNoREC test
+ sleep 20
+ CNOSDB_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestCnosDBTLP test
clickhouse:
name: DBMS Tests (ClickHouse)
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- with:
- fetch-depth: 0
+ - uses: actions/checkout@v4
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: actions/setup-java@v4
with:
- java-version: 11
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
- name: Build SQLancer
run: mvn -B package -DskipTests=true
- name: Set up ClickHouse
run: |
- docker pull yandex/clickhouse-server:latest
- docker run --ulimit nofile=262144:262144 --name clickhouse-server -p8123:8123 -d yandex/clickhouse-server:latest
- sleep 5
+ docker pull clickhouse/clickhouse-server:24.3.1.2672
+ docker run --ulimit nofile=262144:262144 --name clickhouse-server -p8123:8123 -d clickhouse/clickhouse-server:24.3.1.2672
+ until curl -sf http://127.0.0.1:8123/ping 2>/dev/null; do sleep 1; done
- name: Run Tests
- run: CLICKHOUSE_AVAILABLE=true mvn -Dtest=ClickHouseBinaryComparisonOperationTest test
-
+ run: CLICKHOUSE_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=ClickHouseBinaryComparisonOperationTest,TestClickHouse,ClickHouseOperatorsVisitorTest,ClickHouseToStringVisitorTest test
+ - name: Show fatal errors
+ run: docker exec clickhouse-server grep Fatal /var/log/clickhouse-server/clickhouse-server.log || echo No Fatal Errors found
+ - name: Teardown ClickHouse server
+ run: |
+ docker stop clickhouse-server
+ docker rm clickhouse-server
cockroachdb:
name: DBMS Tests (CockroachDB)
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
+ - name: Set up JDK 11
+ uses: actions/setup-java@v4
with:
- fetch-depth: 0
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
+ - name: Build SQLancer
+ run: mvn -B package -DskipTests=true
+ - name: Set up CockroachDB
+ run: |
+ wget -qO- https://binaries.cockroachdb.com/cockroach-v24.2.0.linux-amd64.tgz | tar xvz
+ cd cockroach-v24.2.0.linux-amd64/ && ./cockroach start-single-node --insecure &
+ until cockroach-v24.2.0.linux-amd64/cockroach sql --insecure -e "SELECT 1" 2>/dev/null; do sleep 2; done
+ - name: Create SQLancer user
+ run: cd cockroach-v24.2.0.linux-amd64/ && ./cockroach sql --insecure -e "CREATE USER sqlancer; GRANT admin to sqlancer" && cd ..
+ - name: Run Tests
+ run: |
+ COCKROACHDB_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestCockroachDBNoREC test
+ COCKROACHDB_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestCockroachDBTLP test
+ COCKROACHDB_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestCockroachDBCERT test
+
+ cockroachdb-qpg:
+ name: QPG Tests (CockroachDB)
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: actions/setup-java@v4
with:
- java-version: 11
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
- name: Build SQLancer
run: mvn -B package -DskipTests=true
- name: Set up CockroachDB
run: |
- wget -qO- https://binaries.cockroachdb.com/cockroach-v21.1.7.linux-amd64.tgz | tar xvz
- cd cockroach-v21.1.7.linux-amd64/ && ./cockroach start-single-node --insecure &
- sleep 10
+ wget -qO- https://binaries.cockroachdb.com/cockroach-v24.2.0.linux-amd64.tgz | tar xvz
+ cd cockroach-v24.2.0.linux-amd64/ && ./cockroach start-single-node --insecure &
+ until cockroach-v24.2.0.linux-amd64/cockroach sql --insecure -e "SELECT 1" 2>/dev/null; do sleep 2; done
- name: Create SQLancer user
- run: cd cockroach-v21.1.7.linux-amd64/ && ./cockroach sql --insecure -e "CREATE USER sqlancer; GRANT admin to sqlancer" && cd ..
+ run: cd cockroach-v24.2.0.linux-amd64/ && ./cockroach sql --insecure -e "CREATE USER sqlancer; GRANT admin to sqlancer" && cd ..
- name: Run Tests
- run: COCKROACHDB_AVAILABLE=true mvn -Dtest=TestCockroachDB test
+ run: COCKROACHDB_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestCockroachDBQPG test
databend:
name: DBMS Tests (Databend)
runs-on: ubuntu-latest
+ services:
+ databend:
+ image: datafuselabs/databend:v1.2.896-nightly
+ env:
+ QUERY_DEFAULT_USER: sqlancer
+ QUERY_DEFAULT_PASSWORD: sqlancer
+ ports:
+ - 8000:8000
+ - 3307:3307
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
+ - name: Set up JDK 11
+ uses: actions/setup-java@v4
with:
- fetch-depth: 0
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
+ - name: Build SQLancer
+ run: mvn -B package -DskipTests=true
+ - name: Run Tests
+ run: |
+ DATABEND_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestDatabendTLP test
+ DATABEND_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestDatabendNoREC test
+ DATABEND_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestDatabendPQS test
+
+ datafusion:
+ name: DBMS Tests (DataFusion)
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Rust
+ uses: actions-rs/toolchain@v1
+ with:
+ toolchain: stable
+ override: true
+ - name: Build DataFusion Server
+ run: |
+ cd src/sqlancer/datafusion/server/datafusion_server
+ cargo build
+ - name: Start DataFusion Server
+ run: |
+ cd src/sqlancer/datafusion/server/datafusion_server
+ cargo run &
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: actions/setup-java@v4
with:
- java-version: 11
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
- name: Build SQLancer
run: mvn -B package -DskipTests=true
- - name: Set up Databend
+ - name: Wait for DataFusion Server
run: |
- sudo apt update
- sudo apt install mysql-client
- LASTEST_TAG=$(curl -s GET https://api.github.com/repos/datafuselabs/databend/tags\?per_page\=1 | jq -r '.[].name')
- curl -LJO https://github.com/datafuselabs/databend/releases/download/${LASTEST_TAG}/databend-${LASTEST_TAG}-x86_64-unknown-linux-musl.tar.gz
- mkdir ./databend && tar xzvf databend-${LASTEST_TAG}-x86_64-unknown-linux-musl.tar.gz -C ./databend
- ./databend/bin/databend-query &
- - name: Create SQLancer user
- run: mysql -uroot -h127.0.0.1 -P3307 -e "CREATE USER 'sqlancer' IDENTIFIED BY 'sqlancer'; GRANT ALL ON *.* TO sqlancer;"
+ for i in $(seq 1 30); do
+ if nc -z 127.0.0.1 50051 2>/dev/null; then
+ echo "DataFusion server is ready"
+ exit 0
+ fi
+ echo "Waiting for DataFusion server... ($i/30)"
+ sleep 10
+ done
+ echo "DataFusion server failed to start within 300s"
+ exit 1
- name: Run Tests
run: |
- DATABEND_AVAILABLE=true mvn -Dtest=TestDatabend test
+ DATAFUSION_AVAILABLE=true mvn -Djacoco.skip=true test -Pdatafusion-tests
duckdb:
name: DBMS Tests (DuckDB)
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- with:
- fetch-depth: 0
+ - uses: actions/checkout@v4
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: actions/setup-java@v4
with:
- java-version: 11
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
- name: Build
run: mvn -B package -DskipTests=true
- name: DuckDB Tests
- run: mvn -Dtest=TestDuckDB test
+ run: |
+ mvn -Djacoco.skip=true -Dtest=TestDuckDBTLP test
+ mvn -Djacoco.skip=true -Dtest=TestDuckDBNoREC test
h2:
name: DBMS Tests (H2)
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
+ - name: Set up JDK 11
+ uses: actions/setup-java@v4
+ with:
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
+ - name: Build SQLancer
+ run: mvn -B package -DskipTests=true
+ - name: Run Tests
+ run: mvn -Djacoco.skip=true -Dtest=TestH2 test
+
+ hive:
+ name: DBMS Tests (Hive)
+ runs-on: ubuntu-latest
+ services:
+ metastore:
+ image: apache/hive:4.0.1
+ env:
+ SERVICE_NAME: 'metastore'
+ ports:
+ - 9083:9083
+ volumes:
+ - warehouse:/opt/hive/data/warehouse
+ hiveserver2:
+ image: apache/hive:4.0.1
+ env:
+ SERVICE_NAME: 'hiveserver2'
+ ports:
+ - 10000:10000
+ - 10002:10002
+ volumes:
+ - warehouse:/opt/hive/data/warehouse
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up JDK 11
+ uses: actions/setup-java@v4
+ with:
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
+ - name: Build SQLancer
+ run: mvn -B package -DskipTests=true
+ - name: Run Tests
+ run: HIVE_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestHiveTLP test
+
+ spark:
+ name: DBMS Tests (Spark)
+ runs-on: ubuntu-latest
+
+ services:
+ spark:
+ image: apache/spark:3.5.1
+ ports:
+ - 10000:10000
+
+ command: >-
+ /opt/spark/bin/spark-submit
+ --class org.apache.spark.sql.hive.thriftserver.HiveThriftServer2
+ --name "Thrift JDBC/ODBC Server"
+ --master local[*]
+ --driver-memory 4g
+ --conf spark.hive.server2.thrift.port=10000
+ --conf spark.sql.warehouse.dir=/tmp/spark-warehouse
+ spark-internal
+
+ steps:
+ - uses: actions/checkout@v3
with:
fetch-depth: 0
+
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: actions/setup-java@v3
with:
- java-version: 11
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
+
- name: Build SQLancer
run: mvn -B package -DskipTests=true
+
- name: Run Tests
- run: H2_AVAILABLE=true mvn -Dtest=TestH2 test
+ run: SPARK_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestSparkTLP test
+
+ hsqldb:
+ name: DBMS Tests (HSQLDB)
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up JDK 11
+ uses: actions/setup-java@v4
+ with:
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
+ - name: Build SQLancer
+ run: mvn -B package -DskipTests=true
+ - name: Run Tests
+ run: |
+ mvn -Djacoco.skip=true -Dtest=TestHSQLDBNoREC test
+ mvn -Djacoco.skip=true -Dtest=TestHSQLDBTLP test
mariadb:
name: DBMS Tests (MariaDB)
- runs-on: ubuntu-18.04
+ runs-on: ubuntu-latest
+ services:
+ mysql:
+ image: mariadb:11.7.2
+ env:
+ MYSQL_ROOT_PASSWORD: root
+ ports:
+ - 3306:3306
+ options: --health-cmd="healthcheck.sh --connect --innodb_initialized" --health-interval=10s --health-timeout=5s --health-retries=10
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v4
+ - name: Set up JDK 11
+ uses: actions/setup-java@v4
with:
- fetch-depth: 0
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
+ - name: Build SQLancer
+ run: mvn -B package -DskipTests=true
+ - name: Create SQLancer User
+ run: sudo mysql -h 127.0.0.1 -uroot -proot -e "CREATE USER 'sqlancer'@'%' IDENTIFIED BY 'sqlancer'; GRANT ALL PRIVILEGES ON * . * TO 'sqlancer'@'%';"
+ - name: Run Tests
+ run: MARIADB_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestMariaDB test
+
+ materialize:
+ name: DBMS Tests (Materialize)
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Materialize
+ run: |
+ docker pull materialize/materialized:latest
+ docker run -d -p6875:6875 -p6877:6877 materialize/materialized:latest
+ until pg_isready -h localhost -p 6875 -U materialize; do sleep 1; done
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: actions/setup-java@v4
with:
- java-version: 11
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
- name: Build SQLancer
run: mvn -B package -DskipTests=true
- - name: Install MariaDB
+ - name: Run Tests
run: |
- sudo apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74CD1D8
- sudo add-apt-repository 'deb [arch=amd64,arm64,ppc64el] http://sfo1.mirrors.digitalocean.com/mariadb/repo/10.3/ubuntu bionic main'
- sudo apt update
- sudo apt install mariadb-server
- sudo systemctl start mariadb
- - name: Create SQLancer User
- run: sudo mysql -uroot -proot -e "CREATE USER 'sqlancer'@'localhost' IDENTIFIED BY 'sqlancer'; GRANT ALL PRIVILEGES ON * . * TO 'sqlancer'@'localhost';"
+ MATERIALIZE_AVAILABLE=true mvn -Djacoco.skip=true test -Dtest=TestMaterializeNoREC
+ MATERIALIZE_AVAILABLE=true mvn -Djacoco.skip=true test -Dtest=TestMaterializeTLP
+ MATERIALIZE_AVAILABLE=true mvn -Djacoco.skip=true test -Dtest=TestMaterializePQS
+
+ materialize-qpg:
+ name: QPG Tests (Materialize)
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Materialize
+ run: |
+ docker pull materialize/materialized:latest
+ docker run -d -p6875:6875 -p6877:6877 materialize/materialized:latest
+ until pg_isready -h localhost -p 6875 -U materialize; do sleep 1; done
+ - name: Set up JDK 11
+ uses: actions/setup-java@v4
+ with:
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
+ - name: Build SQLancer
+ run: mvn -B package -DskipTests=true
- name: Run Tests
- run: MARIADB_AVAILABLE=true mvn -Dtest=TestMariaDB test
+ run: |
+ MATERIALIZE_AVAILABLE=true mvn -Djacoco.skip=true test -Dtest=TestMaterializeQPG
+ MATERIALIZE_AVAILABLE=true mvn -Djacoco.skip=true test -Dtest=TestMaterializeQueryPlan
mysql:
- name: DBMS Tests (MySQL)
- runs-on: ubuntu-18.04
+ name: DBMS Tests (MySQL, CERT creation only)
+ runs-on: ubuntu-latest
+ services:
+ mysql:
+ image: mysql:8.4
+ env:
+ MYSQL_ROOT_PASSWORD: root
+ ports:
+ - 3306:3306
+ options: --health-cmd="mysqladmin ping" --health-interval=10s --health-timeout=5s --health-retries=10
steps:
- - uses: actions/checkout@v2
- with:
- fetch-depth: 0
+ - uses: actions/checkout@v4
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: actions/setup-java@v4
with:
- java-version: 11
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
- name: Build SQLancer
run: mvn -B package -DskipTests=true
- - name: Set up MySQL
- run: |
- sudo apt-get install libssl-dev libmecab2 libjson-perl mecab-ipadic-utf8
- sudo apt-get remove mysql-*
- wget -q https://dev.mysql.com/get/Downloads/MySQL-8.0/mysql-server_8.0.20-1ubuntu18.04_amd64.deb-bundle.tar
- tar -xvf mysql-server_8.0.20-1ubuntu18.04_amd64.deb-bundle.tar
- sudo dpkg -i *.deb
- sudo systemctl start mysql
- name: Create SQLancer user
- run: mysql -uroot -proot -e "CREATE USER 'sqlancer'@'localhost' IDENTIFIED BY 'sqlancer'; GRANT ALL PRIVILEGES ON * . * TO 'sqlancer'@'localhost';"
+ run: mysql -h 127.0.0.1 -uroot -proot -e "CREATE USER 'sqlancer'@'%' IDENTIFIED BY 'sqlancer'; GRANT ALL PRIVILEGES ON * . * TO 'sqlancer'@'%';"
- name: Run Tests
run: |
- MYSQL_AVAILABLE=true mvn test -Dtest=TestMySQLPQS
- MYSQL_AVAILABLE=true mvn test -Dtest=TestMySQLTLP
+ MYSQL_AVAILABLE=true mvn -Djacoco.skip=true test -Dtest=TestMySQLPQS
+ MYSQL_AVAILABLE=true mvn -Djacoco.skip=true test -Dtest=TestMySQLTLP
+ MYSQL_AVAILABLE=true mvn -Djacoco.skip=true test -Dtest=TestMySQLCERT
+ MYSQL_AVAILABLE=true mvn -Djacoco.skip=true test -Dtest=TestMySQLDQE
+ oceanbase:
+ name: DBMS Tests (OceanBase)
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up JDK 11
+ uses: actions/setup-java@v4
+ with:
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
+ - name: Build SQLancer
+ run: mvn -B package -DskipTests=true
+ - name: Set up OceanBase
+ run: |
+ docker run -p 2881:2881 --name oceanbase-ce -e MODE=mini -d oceanbase/oceanbase-ce:4.2.1-lts
+ until mysql -h127.1 -uroot@test -P2881 --connect-timeout=3 -Doceanbase -A -e "SELECT 1" 2>/dev/null; do sleep 5; done
+ mysql -h127.1 -uroot@test -P2881 -Doceanbase -A -e"CREATE USER 'sqlancer'@'%' IDENTIFIED BY 'sqlancer'; GRANT ALL PRIVILEGES ON * . * TO 'sqlancer'@'%';"
+ - name: Run Tests
+ run: |
+ OCEANBASE_AVAILABLE=true mvn -Djacoco.skip=true test -Dtest=TestOceanBaseNoREC
+ OCEANBASE_AVAILABLE=true mvn -Djacoco.skip=true test -Dtest=TestOceanBasePQS
+ OCEANBASE_AVAILABLE=true mvn -Djacoco.skip=true test -Dtest=TestOceanBaseTLP
postgres:
name: DBMS Tests (PostgreSQL)
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- with:
- fetch-depth: 0
+ - uses: actions/checkout@v4
- name: Set up PostgreSQL
- uses: harmon758/postgresql-action@v1
+ uses: harmon758/postgresql-action@v1.0.0
with:
- postgresql version: '12'
+ postgresql version: '18'
postgresql user: 'sqlancer'
postgresql password: 'sqlancer'
postgresql db: 'test'
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: actions/setup-java@v4
with:
- java-version: 11
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
- name: Build SQLancer
run: mvn -B package -DskipTests=true
- name: Run Tests
- run: POSTGRES_AVAILABLE=true mvn -Dtest=TestPostgres test
+ run: |
+ POSTGRES_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestPostgresPQS test
+ POSTGRES_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestPostgresTLP test
+ POSTGRES_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestPostgresNoREC test
+ POSTGRES_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestPostgresCERT test
+ presto:
+ name: DBMS Tests (Presto)
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up JDK 11
+ uses: actions/setup-java@v4
+ with:
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
+ - name: Set up Presto
+ run: |
+ docker pull prestodb/presto:latest
+ echo "connector.name=memory" >> memory.properties
+ docker run -p 8080:8080 -d -v ./memory.properties:/opt/presto-server/etc/catalog/memory.properties --name presto prestodb/presto:latest
+ until curl -sf http://127.0.0.1:8080/v1/info 2>/dev/null; do sleep 2; done
+ - name: Build SQLancer
+ run: mvn -B package -DskipTests=true
+ - name: Run Tests
+ run: |
+ PRESTO_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestPrestoNoREC test
+ docker restart presto && until curl -sf http://127.0.0.1:8080/v1/info 2>/dev/null; do sleep 2; done
+ PRESTO_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestPrestoTLP test
sqlite:
name: DBMS Tests (SQLite)
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- with:
- fetch-depth: 0
+ - uses: actions/checkout@v4
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: actions/setup-java@v4
with:
- java-version: 11
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
- name: Build
run: mvn -B package -DskipTests=true
- name: SQLite Tests
- run: |
- mvn -Dtest=TestSQLitePQS test
- mvn -Dtest=TestSQLite3 test
+ run: |
+ mvn -Djacoco.skip=true -Dtest=TestSQLitePQS test
+ mvn -Djacoco.skip=true -Dtest=TestSQLiteTLP test
+ mvn -Djacoco.skip=true -Dtest=TestSQLiteNoREC test
+ mvn -Djacoco.skip=true -Dtest=TestSQLiteCODDTest test
+
+ sqlite-qpg:
+ name: QPG Tests (SQLite)
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up JDK 11
+ uses: actions/setup-java@v4
+ with:
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
+ - name: Build
+ run: mvn -B package -DskipTests=true
+ - name: SQLite Tests for QPG
+ run: |
+ mvn -Djacoco.skip=true -Dtest=TestSQLiteQPG test
tidb:
- name: DBMS Tests (TiDB)
+ name: DBMS Tests (TiDB, TLP creation only)
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
- with:
- fetch-depth: 0
+ - uses: actions/checkout@v4
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: actions/setup-java@v4
with:
- java-version: 11
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
- name: Build SQLancer
run: mvn -B package -DskipTests=true
- name: Set up TiDB
run: |
- docker pull pingcap/tidb:latest
- docker run --name tidb-server -d -p 4000:4000 pingcap/tidb:latest
- sleep 10
+ docker pull hawkingrei/tidb-playground:nightly-2025-09-16
+ docker run --name tidb-server -d -p 4000:4000 hawkingrei/tidb-playground:nightly-2025-09-16
+ until mysql -h 127.0.0.1 -P 4000 -u root --connect-timeout=3 -e "SELECT 1" 2>/dev/null; do sleep 3; done
- name: Create SQLancer user
- run: sudo mysql -h 127.0.0.1 -P 4000 -u root -D test -e "CREATE USER 'sqlancer'@'%' IDENTIFIED WITH mysql_native_password BY 'sqlancer'; GRANT ALL PRIVILEGES ON *.* TO 'sqlancer'@'%' WITH GRANT OPTION; FLUSH PRIVILEGES;"
+ run: mysql -h 127.0.0.1 -P 4000 -u root -D test -e "CREATE USER 'sqlancer'@'%' IDENTIFIED WITH mysql_native_password BY 'sqlancer'; GRANT ALL PRIVILEGES ON *.* TO 'sqlancer'@'%' WITH GRANT OPTION; FLUSH PRIVILEGES;"
- name: Run Tests
- run: TIDB_AVAILABLE=true mvn -Dtest=TestTiDB test
+ run: |
+ TIDB_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestTiDBTLP test
+ TIDB_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestTiDBCERT test
- java13:
- name: Java 13 Compatibility (DuckDB)
+ tidb-qpg:
+ name: QPG Tests (TiDB)
runs-on: ubuntu-latest
-
steps:
- - uses: actions/checkout@v2
- with:
- fetch-depth: 0
- - name: Set up JDK 13
- uses: actions/setup-java@v1
+ - uses: actions/checkout@v4
+ - name: Set up JDK 11
+ uses: actions/setup-java@v4
with:
- java-version: 13
- - name: Build
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
+ - name: Build SQLancer
run: mvn -B package -DskipTests=true
- - name: Shortly run DuckDB
- run: cd target && java -jar $(ls | grep -P 'sqlancer-[0-9.]*.jar') --num-threads 4 --timeout-seconds 30 --num-queries 0 duckdb
+ - name: Set up TiDB
+ run: |
+ docker pull hawkingrei/tidb-playground:nightly-2025-09-16
+ docker run --name tidb-server -d -p 4000:4000 hawkingrei/tidb-playground:nightly-2025-09-16
+ until mysql -h 127.0.0.1 -P 4000 -u root --connect-timeout=3 -e "SELECT 1" 2>/dev/null; do sleep 3; done
+ - name: Create SQLancer user
+ run: mysql -h 127.0.0.1 -P 4000 -u root -D test -e "CREATE USER 'sqlancer'@'%' IDENTIFIED WITH mysql_native_password BY 'sqlancer'; GRANT ALL PRIVILEGES ON *.* TO 'sqlancer'@'%' WITH GRANT OPTION; FLUSH PRIVILEGES;"
+ - name: Run Tests
+ run: TIDB_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestTiDBQPG test
- java14:
- name: Java 14 Compatibility (DuckDB)
+ yugabyte:
+ name: DBMS Tests (YugabyteDB)
runs-on: ubuntu-latest
-
steps:
- - uses: actions/checkout@v2
- with:
- fetch-depth: 0
- - name: Set up JDK 14
- uses: actions/setup-java@v1
+ - uses: actions/checkout@v4
+ - name: Set up JDK 11
+ uses: actions/setup-java@v4
with:
- java-version: 14
- - name: Build
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
+ - name: Build SQLancer
run: mvn -B package -DskipTests=true
- - name: Shortly run DuckDB
- run: cd target && java -jar $(ls | grep -P 'sqlancer-[0-9.]*.jar') --num-threads 4 --timeout-seconds 30 --num-queries 0 duckdb
-
+ - name: Set up Yugabyte
+ run: |
+ docker pull yugabytedb/yugabyte:latest
+ docker run -d --name yugabyte -p7000:7000 -p9000:9000 -p5433:5433 -p9042:9042 yugabytedb/yugabyte:latest bin/yugabyted start --daemon=false
+ until pg_isready -h localhost -p 5433; do sleep 1; done
+ - name: Run Tests
+ run: |
+ YUGABYTE_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestYSQLNoREC test
+ YUGABYTE_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestYSQLTLP test
+ YUGABYTE_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestYSQLPQS test
+ YUGABYTE_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestYCQL test
- java15:
- name: Java 15 EA Compatibility (DuckDB)
+ doris:
+ name: DBMS Tests (Apache Doris)
runs-on: ubuntu-latest
-
steps:
- - uses: actions/checkout@v2
- with:
- fetch-depth: 0
- - name: Set up JDK 15
- uses: actions/setup-java@v1
+ - uses: actions/checkout@v4
+ - name: Set up JDK 11
+ uses: actions/setup-java@v4
with:
- java-version: 15-ea
- - name: Build
+ distribution: 'temurin'
+ java-version: '11'
+ cache: 'maven'
+ - name: install mysql client
+ run: |
+ sudo apt update
+ sudo apt install mysql-client --assume-yes
+ - name: Set up Apache Doris
+ run: |
+ sudo sysctl -w vm.max_map_count=2000000
+ wget -q https://apache-doris-releases.oss-accelerate.aliyuncs.com/apache-doris-2.1.4-bin-x64.tar.gz
+ tar zxf apache-doris-2.1.4-bin-x64.tar.gz
+ mv apache-doris-2.1.4-bin-x64 apache-doris
+ sudo swapoff -a
+ cd apache-doris/fe
+ ./bin/start_fe.sh --daemon
+ cd ../be
+ ./bin/start_be.sh --daemon
+
+ until mysql -u root -h 127.0.0.1 --port 9030 --connect-timeout=3 -e "SELECT 1" 2>/dev/null; do sleep 3; done
+ IP=$(hostname -I | awk '{print $1}')
+ mysql -u root -h 127.0.0.1 --port 9030 -e "ALTER SYSTEM ADD BACKEND '${IP}:9050';"
+ mysql -u root -h 127.0.0.1 --port 9030 -e "CREATE USER 'sqlancer' IDENTIFIED BY 'sqlancer'; GRANT ALL ON *.* TO sqlancer;"
+ - name: Build SQLancer
run: mvn -B package -DskipTests=true
- - name: Shortly run DuckDB
- run: cd target && java -jar $(ls | grep -P 'sqlancer-[0-9.]*.jar') --num-threads 4 --timeout-seconds 30 --num-queries 0 duckdb
+ - name: Run Tests
+ run: |
+ DORIS_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestDorisNoREC test
+ DORIS_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestDorisPQS test
+ DORIS_AVAILABLE=true mvn -Djacoco.skip=true -Dtest=TestDorisTLP test
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 342a358f3..6194c1529 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -9,9 +9,10 @@ jobs:
steps:
- uses: actions/checkout@v2
- name: Set up Maven Central Repository
- uses: actions/setup-java@v1
+ uses: actions/setup-java@v3
with:
- java-version: 11
+ distribution: 'temurin'
+ java-version: '11'
server-id: ossrh
server-username: MAVEN_USERNAME
server-password: MAVEN_PASSWORD
@@ -29,9 +30,10 @@ jobs:
- name: Check out the repo
uses: actions/checkout@v2
- name: Set up JDK 11
- uses: actions/setup-java@v1
+ uses: actions/setup-java@v3
with:
- java-version: 11
+ distribution: 'temurin'
+ java-version: '11'
- name: Build SQLancer
run: mvn -B package -DskipTests=true
- name: Push to Docker Hub
diff --git a/.gitignore b/.gitignore
index 48efcf83a..d7cbeb55f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,8 +1,7 @@
target/
.classpath
-.settings/org.eclipse.core.resources.prefs
-.settings/org.eclipse.m2e.core.prefs
-.settings/org.eclipse.jdt.core.prefs
+.settings/
+.vscode
.project
.checkstyle
*.DS_Store
@@ -11,3 +10,7 @@ SQLancer.iml
dependency-reduced-pom.xml
database0.db
databaseconnectiontest.db
+database*.log
+database*.properties
+database*.script
+databases/
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 81177e352..ea5baea1c 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -1,6 +1,6 @@
# Development
-## Working with Eclipse
+## Working with Eclipse [[Video Guide]](https://www.youtube.com/watch?v=KsuGrOLKb9Q)
Developing SQLancer using Eclipse is expected to work well. You can import SQLancer with a single step:
@@ -12,7 +12,23 @@ If you do not find an option to import Maven projects, you might need to install
## Implementing Support for a New DBMS
-The DuckDB implementation provides a good template for a new implementation. The `DuckDBProvider` class is the central class that manages the creation of the databases and executes the selected test oracles. Try to copy its structure for the new DBMS that you want to implement, and start by generate databases (without implementing a test oracle). As part of this, you will also need to implement the equivalent of `DuckDBSchema`, which represents the database schema of the generated database. After you can successfully generate databases, the next step is to generate one of the test oracles. For example, you might want to implement NoREC (see `DuckDBNoRECOracle` or `DuckDBQueryPartitioningWhereTester` for TLP). As part of this, you must also implement a random expression generator (see `DuckDBExpressionGenerator`) and a visitor to derive the textual representation of an expression (see `DuckDBToStringVisitor`).
+The DuckDB implementation provides a good template for a new implementation. The `DuckDBProvider` class is the central class that manages the creation of the databases and executes the selected test oracles. Try to copy its structure for the new DBMS that you want to implement, and start by generate databases (without implementing a test oracle). As part of this, you will also need to implement the equivalent of `DuckDBSchema`, which represents the database schema of the generated database. After you can successfully generate databases, the next step is to generate one of the test oracles. For example, you might want to implement NoREC (see enum value `NOREC` in `DuckDBOracleFactory`). As part of this, you must also implement a random expression generator (see `DuckDBExpressionGenerator`) and a visitor to derive the textual representation of an expression (see `DuckDBToStringVisitor`).
+
+Please consider the following suggestions when creating a PR to contribute a new DBMS:
+* Ensure that `mvn verify -DskipTests=true` does not result in style violations.
+* Add a [CI test](https://github.com/sqlancer/sqlancer/blob/master/.github/workflows/main.yml) to ensure that future changes to SQLancer are unlikely to break the newly-supported DBMS. It is reasonable to do this in a follow-up PR—please indicate whether you plan to do so in the PR description.
+* Add the DBMS' name to the [check_names.py](https://github.com/sqlancer/sqlancer/blob/master/src/check_names.py) script, which ensures adherence to a common prefix in the Java classes.
+* Add the DBMS' name to the [README.md](https://github.com/sqlancer/sqlancer/blob/master/README.md#supported-dbms) file.
+* It would be easier to review multiple smaller PRs, than one PR that contains the complete implementation. Consider contributing parts of your implementation as you work on their implementation.
+
+### Expected Errors
+
+Most statements have an [ExpectedError](https://github.com/sqlancer/sqlancer/blob/aa0c0eccba4eefa75bfd518f608c9222c692c11d/src/sqlancer/common/query/ExpectedErrors.java) object associated with them. This object essentially contains a list of errors, one of which the database system might return if it cannot successfully execute the statement. These errors are typically added through a trial-and-error process while considering various tradeoffs. For example, consider the [DuckDBInsertGenerator](https://github.com/sqlancer/sqlancer/blob/aa0c0eccba4eefa75bfd518f608c9222c692c11d/src/sqlancer/duckdb/gen/DuckDBInsertGenerator.java#L38) class, whose expected errors are specified in [DuckDBErrors](https://github.com/sqlancer/sqlancer/blob/aa0c0eccba4eefa75bfd518f608c9222c692c11d/src/sqlancer/duckdb/DuckDBErrors.java#L90). When implementing such a generator, the list of expected errors might first be empty. When running the generator for the first time, you might receive an error such as "create unique index, table contains duplicate data", indicating that creating the index failed due to duplicate data. In principle, this error could be avoided by first checking whether the column contains any duplicate values. However, checking this would be expensive and error-prone (e.g., consider string similarity, which might depend on collations); thus, the obvious choice would be to add this string to the list of expected errors, and run the generator again to check for any other expected errors. In other cases, errors might be best addressed through improvements in the generators. For example, it is typically straightforward to generate syntactically-valid statements, which is why syntax errors should not be ignored. This approach is effective in uncovering internal errors; rather than ignoring them as an expected error, report them, and see [Unfixed Bugs](#unfixed-bugs) below.
+
+### Bailing Out While Generating a Statement
+
+In some cases, it might be undesirable or even impossible to generate a specific statement type. For example, consider that SQLancer tries to execute a `DROP TABLE` statement (e.g., see [TiDBDropTableGenerator](https://github.com/sqlancer/sqlancer/blob/30948f34acc2354d6be18a70bdeeebff1e73fa48/src/sqlancer/tidb/gen/TiDBDropTableGenerator.java)), but the database contains only a single table. Dropping the table would result in all subsequent attempts to insert data or query it to fail. Thus, in such a case, it might be more efficient to "bail out" by abandoning the current attempt to generate the statement. This can be achieved by throwing a `IgnoreMeException`. Unlike for other exceptions, SQLancer silently continues execution rather than reporting this exception to the user.
+
### Typed vs. Untyped Expression Generation
@@ -30,6 +46,47 @@ For a permissive DBMS, implementing the expression generator is easier, since th
For a strict DBMS, the better approach is typically to attempt to generate expressions of the expected type. For PostgreSQL, the expression generator thus expects an additional type argument (see [PostgreSQLExpressionGenerator](https://github.com/sqlancer/sqlancer/blob/86647df8aa2dd8d167b5c3ce3297290f5b0b2bcd/src/sqlancer/postgres/gen/PostgresExpressionGenerator.java#L251)). This type is propagated recursively. For example, if we require a predicate for the `WHERE` clause, we pass boolean as a type. The expression generator then calls a method `generateBooleanExpression` that attempts to produce a boolean expression, by, for example, generating a comparison (e.g., `<=`). For the comparison's operands, a random type is then selected and propagated. For example, if an integer type is selected, then `generateExpression` is called with this type once for the left operand, and once for the right operand. Note that this process does not guarantee that the expression will indeed have the expected type. It might happen, for example, that the expression generator attempts to produce an integer value, but that it produces a double value instead, namely when an integer overflow occurs, which, depending on the DBMS, implicitly converts the result to a floating-point value.
+#### Supported DBMS
+
+Since SQL dialects differ widely, each DBMS to be tested requires a separate implementation.
+
+| DBMS | Status | Expression Generation | Description |
+| ---------------------------- | ----------- | ---------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| SQLite | Working | Untyped | This implementation is currently affected by a significant performance regression that still needs to be investigated |
+| MySQL | Working | Untyped | Running this implementation likely uncovers additional, unreported bugs. |
+| PostgreSQL | Working | Typed | |
+| Citus (PostgreSQL Extension) | Working | Typed | This implementation extends the PostgreSQL implementation of SQLancer, and was contributed by the Citus team. |
+| MariaDB | Preliminary | Untyped | The implementation of this DBMS is very preliminary, since we stopped extending it after all but one of our bug reports were addressed. Running it likely uncovers additional, unreported bugs. |
+| CockroachDB | Working | Typed | |
+| TiDB | Working | Untyped | |
+| DuckDB | Working | Untyped, Generic | |
+| ClickHouse | Preliminary | Untyped, Generic | Implementing the different table engines was not convenient, which is why only a very preliminary implementation exists. |
+| TDEngine | Removed | Untyped | We removed the TDEngine implementation since all but one of our bug reports were still unaddressed five months after we reported them. |
+| OceanBase | Working | Untyped | |
+| YugabyteDB | Working | Typed (YSQL), Untyped (YCQL) | YSQL implementation based on Postgres code. YCQL implementation is primitive for now and uses Cassandra JDBC driver as a proxy interface. |
+| Databend | Working | Typed | |
+| QuestDB | Working | Untyped, Generic | The implementation of QuestDB is still WIP, current version covers very basic data types, operations and SQL keywords. |
+| CnosDB | Working | Typed | The implementation of CnosDB currently uses Restful API. |
+| Materialize | Working | Typed | |
+| Apache Doris | Preliminary | Typed | This is a preliminary implementation, which only contains the common logic of Doris. We have found some errors through it, and hope to improve it in the future. |
+| Presto | Preliminary | Typed | This is a preliminary implementation, only basic types supported. |
+| DataFusion | Preliminary | Typed | Only basic SQL features are supported. |
+
+#### Previously Supported DBMS
+
+Some DBMS were once supported but subsequently removed.
+
+| DBMS | Pull Request | Description |
+| ---------- | ----------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| ArangoDB | [#915](https://github.com/sqlancer/sqlancer/pull/915) | This implementation was removed because ArangoDB is a NoSQL DBMS, while the majority were SQL DBMSs, which resulted in difficulty refactoring SQLancer. |
+| Cosmos | [#915](https://github.com/sqlancer/sqlancer/pull/915) | This implementation was removed because Cosmos is a NoSQL DBMS, while the majority were SQL DBMSs, which resulted in difficulty refactoring SQLancer. |
+| MongoDB | [#915](https://github.com/sqlancer/sqlancer/pull/915) | This implementation was removed because MongoDB is a NoSQL DBMS, while the majority were SQL DBMSs, which resulted in difficulty refactoring SQLancer. |
+| StoneDB | [#963](https://github.com/sqlancer/sqlancer/pull/963) | This implementation was removed because development of StoneDB stopped.
+
+### Unfixed Bugs
+
+Often, some bugs are fixed only after an extended period, meaning that SQLancer will repeatedly report the same bug. In such cases, it might be possible to avoid generating the problematic pattern, or adding an expected error with the internal error message. Rather than, for example, commenting out the code with the bug-inducing pattern, a pattern implemented by the [TiDBBugs class](https://github.com/sqlancer/sqlancer/blob/4c20a94b3ad2c037e1a66c0b637184f8c20faa7e/src/sqlancer/tidb/TiDBBugs.java) should be applied. The core idea is to use a public, static flag for each issue, which is set to true as long as the issue persists (e.g., see [bug35652](https://github.com/sqlancer/sqlancer/blob/4c20a94b3ad2c037e1a66c0b637184f8c20faa7e/src/sqlancer/tidb/TiDBBugs.java#L55)). The work-around code is then executed—or the problematic pattern should not be generated—if the flag is set to true (e.g., [an expected error is added for bug35652](https://github.com/sqlancer/sqlancer/blob/59564d818d991d54b32fa5a79c9f733799c090f2/src/sqlancer/tidb/TiDBErrors.java#L47)). This makes it easy to later on identify and remove all such work-around code once the issue has been fixed.
+
## Options
SQLancer uses [JCommander](https://jcommander.org/) for handling options. The `MainOptions` class contains options that are expected to be supported by all DBMS-testing implementations. Furthermore, each `*Provider` class provides a method to return an additional set of supported options.
@@ -50,12 +107,12 @@ You can run them using the following command:
mvn verify
```
-We use [Travis-CI](https://travis-ci.com/) to automatically check PRs.
+We use [GitHub Actions](https://github.com/sqlancer/sqlancer/blob/master/.github/workflows/main.yml) to automatically check PRs.
## Testing
-As part of the Travis-CI gate, we use smoke testing by running SQLancer on each supported DBMS for some minutes, to test that nothing is obviously broken. For DBMS for which all bugs have been fixed, we verify that SQLancer cannot find any further bugs (i.e., the return code is zero).
+As part of the GitHub Actions check, we use smoke testing by running SQLancer on each supported DBMS for some minutes, to test that nothing is obviously broken. For DBMS for which all bugs have been fixed, we verify that SQLancer cannot find any further bugs (i.e., the return code is zero).
In addition, we use [unit tests](https://github.com/sqlancer/sqlancer/tree/master/test/sqlancer) to test SQLancer's core functionality, such as random string and number generation as well as option passing. When fixing a bug, add a unit test, if it is easily possible.
@@ -65,13 +122,13 @@ You can run the tests using the following command:
mvn test
```
-Note that per default, the smoke testing is performed only for embedded DBMS (i.e., DuckDB and SQLite). To run smoke tests also for the other DBMS, you need to set environment variables. For example, you can run the MySQL smoke testing (and no other tests) using the following command:
+Note that per default, the smoke testing is performed only for embedded DBMS (e.g., DuckDB and SQLite). To run smoke tests also for the other DBMS, you need to set environment variables. For example, you can run the MySQL smoke testing (and no other tests) using the following command:
```
MYSQL_AVAILABLE=true mvn -Dtest=TestMySQL test
```
-For up-to-date testing commands, check out the `.travis.yml` file.
+For up-to-date testing commands, check out the `.github/workflows/main.yml` file.
## Reviewing
@@ -88,4 +145,5 @@ Please pay attention to good commit messages (in particular subject lines). As b
2. Do not end the subject line with a period. For example, write "Refactor the handling of indexes" rather than "Refactor the handling of indexes.".
3. Use the imperative mood in the subject line. For example, write "Refactor the handling of indexes" rather than "Refactoring" or "Refactor**ed** the handling of indexes".
-Please also pay attention to a clean commit history. Rather than merging with the main branch, use `git rebase` to rebase your commits on the main branch. Sometimes, it might happen that you discover an issue only after having already created a commit, for example, when an issue is found by `mvn verify` in the Travis CI. Do not introduce a separate commit for such issues. If the issue was introduced by the last commit, you can fix the issue, and use `git commit --amend` to change the latest commit. If the change was introduced by one of the previous commits, you can use `git rebase -i` to change the respective commit. If you already have a number of such commits, you can use `git squash` to "collapse" multiple commits into one. For more information, you might want to read [How (and Why!) to Keep Your Git Commit History Clean](https://about.gitlab.com/blog/2018/06/07/keeping-git-commit-history-clean/) written by Kushal Pandya.
+Please also pay attention to a clean commit history. Rather than merging with the main branch, use `git rebase` to rebase your commits on the main branch. Sometimes, it might happen that you discover an issue only after having already created a commit, for example, when an issue is found by `mvn verify` in the CI checks. Do not introduce a separate commit for such issues. If the issue was introduced by the last commit, you can fix the issue, and use `git commit --amend` to change the latest commit. If the change was introduced by one of the previous commits, you can use `git rebase -i` to change the respective commit. If you already have a number of such commits, you can use `git squash` to "collapse" multiple commits into one. For more information, you might want to read [How (and Why!) to Keep Your Git Commit History Clean](https://about.gitlab.com/blog/2018/06/07/keeping-git-commit-history-clean/) written by Kushal Pandya.
+
diff --git a/README.md b/README.md
index 2209e7589..134f47666 100644
--- a/README.md
+++ b/README.md
@@ -1,25 +1,23 @@
[](https://github.com/sqlancer/sqlancer/actions)
-[](https://twitter.com/sqlancer_dbms)
-# SQLancer

-SQLancer (Synthesized Query Lancer) is a tool to automatically test Database Management Systems (DBMS) in order to find logic bugs in their implementation. We refer to logic bugs as those bugs that cause the DBMS to fetch an incorrect result set (e.g., by omitting a record).
+SQLancer is a tool to automatically test Database Management Systems (DBMSs) in order to find bugs in their implementation. That is, it finds bugs in the code of the DBMS implementation, rather than in queries written by the user. SQLancer has found hundreds of bugs in mature and widely-known DBMSs.
-SQLancer operates in the following two phases:
+SQLancer tackles two essential challenges when automatically testing the DBMSs:
+1. **Test input generation**: SQLancer implements approaches for automatically generating SQL statements. It contains various hand-written SQL generators that operate in multiple phases. First, a database schema is created, which refers to a set of tables and their columns. Then, data is inserted into these tables, along with creating various other kinds of database states such as indexes, views, or database-specific options. Finally, queries are generated, which can be validated using one of multiple result validators (also called *test oracles*) that SQLancer provides. Besides the standard approach of creating the statements in an unguided way, SQLancer also supports a test input-generation approach that is feedback-guided and aims to exercise as many unique query plans as possible based on the intuition that doing so would exercise many interesting behaviors in the database system [[ICSE '23]](https://arxiv.org/pdf/2312.17510).
+2. **Test oracles**: A key innovation in SQLancer is that it provides ways to find deep kinds of bugs in DBMSs. As a main focus, it can find logic bugs, which are bugs that cause the DBMS to fetch an incorrect result set (e.g., by omitting a record). We have proposed multiple complementary test oracles such as *Ternary Logic Partitioning (TLP)* [[OOPSLA '20]](https://dl.acm.org/doi/pdf/10.1145/3428279), *Non-optimizing Reference Engine Construction (NoREC)* [[ESEC/FSE 2020]](https://arxiv.org/abs/2007.08292), *Pivoted Query Synthesis (PQS)* [[OSDI '20]](https://www.usenix.org/system/files/osdi20-rigger.pdf), *Differential Query Plans (DQP)* [[SIGMOD '24]](https://dl.acm.org/doi/pdf/10.1145/3654991), and *Constant Optimization Driven Database System Testing (CODDTest)* [SIGMOD '25]. It can also find specific categories of performance issues, which refer to cases where a DBMS could reasonably be expected to produce its result more efficiently using a technique called *Cardinality Estimation Restriction Testing (CERT)* [[ICSE '24]](https://arxiv.org/pdf/2306.00355). SQLancer can detect unexpected internal errors (e.g., an error that the database is corrupted) by declaring all potential errors that might be returned by a DBMS for a given query. Finally, SQLancer can find crash bugs, which are bugs that cause the DBMS process to terminate. For this, it uses an implicit test oracle.
-1. Database generation: The goal of this phase is to create a populated database, and stress the DBMS to increase the probability of causing an inconsistent database state that could be detected subsequently. First, random tables are created. Then, randomly SQL statements are chosen to generate, modify, and delete data. Also other statements, such as those to create indexes as well as views and to set DBMS-specific options are sent to the DBMS.
-2. Testing: The goal of this phase is to detect the logic bugs based on the generated database. See Testing Approaches below.
+**Community.** We have a [Slack workspace](https://join.slack.com/t/sqlancer/shared_invite/zt-eozrcao4-ieG29w1LNaBDMF7OB_~ACg) to discuss SQLancer, and DBMS testing in general. Previously, SQLancer had an account on Twitter/X [@sqlancer_dbms](https://twitter.com/sqlancer_dbms), which is no longer maintained. We have a [blog](https://sqlancer.github.io/posts/), which, as of now, contains only posts by contributors of the [Google Summer of Code project](https://summerofcode.withgoogle.com/archive/2023/organizations/sqlancer).
-# Getting Started
+# Getting Started [[Video Guide]](https://www.youtube.com/watch?v=lcZ6LixPH1Y)
-Requirements:
+Minimum Requirements:
* Java 11 or above
-* [Maven](https://maven.apache.org/) (`sudo apt install maven` on Ubuntu)
-* The DBMS that you want to test (embedded DBMSs such as DuckDB, H2, and SQLite do not require a setup)
+* [Maven](https://maven.apache.org/)
-The following commands clone SQLancer, create a JAR, and start SQLancer to test SQLite using Non-optimizing Reference Engine Construction (NoREC):
+The following commands clone SQLancer, create a JAR, and start SQLancer to test SQLite using [Non-optimizing Reference Engine Construction (NoREC)](https://arxiv.org/abs/2007.08292):
```
git clone https://github.com/sqlancer/sqlancer
@@ -29,74 +27,90 @@ cd target
java -jar sqlancer-*.jar --num-threads 4 sqlite3 --oracle NoREC
```
-If the execution prints progress information every five seconds, then the tool works as expected. Note that SQLancer might find bugs in SQLite. Before reporting these, be sure to check that they can still be reproduced when using the latest development version. The shortcut CTRL+C can be used to terminate SQLancer manually. If SQLancer does not find any bugs, it executes infinitely. The option `--num-tries` can be used to control after how many bugs SQLancer terminates. Alternatively, the option `--timeout-seconds` can be used to specify the maximum duration that SQLancer is allowed to run.
+**Running and terminating.** If the execution prints progress information every five seconds, then the tool works as expected. The shortcut CTRL+C can be used to terminate SQLancer manually. If SQLancer does not find any bugs, it executes infinitely. The option `--num-tries` can be used to control after how many bugs SQLancer terminates. Alternatively, the option `--timeout-seconds` can be used to specify the maximum duration that SQLancer is allowed to run.
-If you launch SQLancer without parameters, available options and commands are displayed. Note that general options that are supported by all DBMS-testing implementations (e.g., `--num-threads`) need to precede the name of DBMS to be tested (e.g., `sqlite3`). Options that are supported only for specific DBMS (e.g., `--test-rtree` for SQLite3), or options for which each testing implementation provides different values (e.g. `--oracle NoREC`) need to go after the DBMS name.
+**Parameters.** If you launch SQLancer without parameters, available options and commands are displayed. Note that general options that are supported by all DBMS-testing implementations (e.g., `--num-threads`) need to precede the name of the DBMS to be tested (e.g., `sqlite3`). Options that are supported only for specific DBMS (e.g., `--test-rtree` for SQLite3), or options for which each testing implementation provides different values (e.g. `--oracle NoREC`) need to go after the DBMS name.
-# Testing Approaches
+**DBMSs.** To run SQLancer on SQLite, it was not necessary to install and set up a DBMS. The reason for this is that embedded DBMSs run in the same process as the application and thus require no separate installation or setup. Embedded DBMSs supported by SQLancer include DuckDB, H2, and SQLite. Their binaries are included as [JAR dependencies](https://github.com/sqlancer/sqlancer/blob/main/pom.xml). Note that any crashes in these systems will also cause a crash in the JVM on which SQLancer runs.
-| Approach | Description |
-|------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| Pivoted Query Synthesis (PQS) | PQS is the first technique that we designed and implemented. It randomly selects a row, called a pivot row, for which a query is generated that is guaranteed to fetch the row. If the row is not contained in the result set, a bug has been detected. It is fully described [here](https://arxiv.org/abs/2001.04174). PQS is the most powerful technique, but also requires more implementation effort than the other two techniques. It is currently unmaintained. |
-| Non-optimizing Reference Engine Construction (NoREC) | NoREC aims to find optimization bugs. It is described [here](https://www.manuelrigger.at/preprints/NoREC.pdf). It translates a query that is potentially optimized by the DBMS to one for which hardly any optimizations are applicable, and compares the two result sets. A mismatch between the result sets indicates a bug in the DBMS. |
-| Ternary Logic Partitioning (TLP) | TLP partitions a query into three partitioning queries, whose results are composed and compare to the original query's result set. A mismatch in the result sets indicates a bug in the DBMS. In contrast to NoREC and PQS, it can detect bugs in advanced features such as aggregate functions. |
-Please find the `.bib` entries [here](docs/PAPERS.md).
+# Using SQLancer
-# Supported DBMS
+**Logs.** SQLancer stores logs in the `target/logs` subdirectory. By default, the option `--log-each-select` is enabled, which results in every SQL statement that is sent to the DBMS being logged. The corresponding file names are postfixed with `-cur.log`. In addition, if SQLancer detects a logic bug, it creates a file with the extension `.log`, in which the statements to reproduce the bug are logged, including only the last query that was executed along with the other statements to set up the database state.
-Since SQL dialects differ widely, each DBMS to be tested requires a separate implementation.
+**Reducing bugs.** After finding a bug-inducing test input, the input typically needs to be reduced to be further analyzed, as it might contain many SQL statements that are redundant to reproduce the bug. One option is to do this manually, by removing a statement or feature at a time, replaying the bug-inducing statements, and applying the test oracle (e.g., for test oracles like TLP or NoREC, this would require checking that both queries still produce a different result). This process can be automated using a so-called [delta-debugging approach](https://www.debuggingbook.org/html/DeltaDebugger.html). SQLancer includes an experimental implementation of a delta debugging approach, which can be enabled using `--use-reducer`. In the past, we have successfully used [C-Reduce](https://embed.cs.utah.edu/creduce/), which requires specifying the test oracle in a script that can be executed by C-Reduce.
-| DBMS | Status | Expression Generation | Description |
-|-------------|-------------|-----------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| SQLite | Working | Untyped | This implementation is currently affected by a significant performance regression that still needs to be investigated |
-| MySQL | Working | Untyped | Running this implementation likely uncovers additional, unreported bugs. |
-| PostgreSQL | Working | Typed | |
-| Citus (PostgreSQL Extension) | Working | Typed | This implementation extends the PostgreSQL implementation of SQLancer, and was contributed by the Citus team. |
-| MariaDB | Preliminary | Untyped | The implementation of this DBMS is very preliminary, since we stopped extending it after all but one of our bug reports were addressed. Running it likely uncovers additional, unreported bugs. |
-| CockroachDB | Working | Typed | |
-| TiDB | Working | Untyped | |
-| DuckDB | Working | Untyped, Generic | |
-| ClickHouse | Preliminary | Untyped, Generic | Implementing the different table engines was not convenient, which is why only a very preliminary implementation exists. |
-| TDEngine | Removed | Untyped | We removed the TDEngine implementation since all but one of our bug reports were still unaddressed five months after we reported them. |
-| OceanBase | Working | Untyped | |
+**Testing the latest DBMS version.** For most DBMSs, SQLancer supports only a previous *release* version. Thus, potential bugs that SQLancer finds could be already fixed in the latest *development* version of the DBMS. If you are not a developer of the DBMS that you are testing, we would like to encourage you to validate that the bug can still be reproduced before reporting it. We would appreciate it if you could mention SQLancer when you report bugs found by it. We would also be excited to hear about your experience using SQLancer or related use cases or extensions.
+**Options.** SQLancer provides many options that you can use to customize its behavior. Executing `java -jar sqlancer-*.jar --help` will list them and should print output such as the following:
+```
+Usage: SQLancer [options] [command] [command options]
+ Options:
+ --ast-reducer-max-steps
+ EXPERIMENTAL Maximum steps the AST-based reducer will do
+ Default: -1
+ --ast-reducer-max-time
+ EXPERIMENTAL Maximum time duration (secs) the statement reducer will do
+ Default: -1
+ --canonicalize-sql-strings
+ Should canonicalize query string (add ';' at the end
+ Default: true
+ --constant-cache-size
+ Specifies the size of the constant cache. This option only takes effect
+ when constant caching is enabled
+ Default: 100
+...
+```
-# Using SQLancer
+**Which SQLancer version to use.** The recommended way to use SQLancer is to use its latest source version on GitHub. Infrequent and irregular official releases are also available on the following platforms:
+* [GitHub](https://github.com/sqlancer/sqlancer/releases)
+* [Maven Central](https://search.maven.org/artifact/com.sqlancer/sqlancer)
+* [DockerHub](https://hub.docker.com/r/mrigger/sqlancer)
+
+**Understanding SQL generation.** To analyze bug-inducing statements, it is helpful to understand the characteristics of SQLancer. First, SQLancer is expected to always generate SQL statements that are syntactically valid for the DBMS under test. Thus, you should never observe any syntax errors. Second, SQLancer might generate statements that are semantically invalid. For example, SQLancer might attempt to insert duplicate values into a column with a `UNIQUE` constraint, as completely avoiding such semantic errors is challenging. Third, any bug reported by SQLancer is expected to be a real bug, except those reported by CERT (as performance issues are not as clearly defined as other kinds of bugs). If you observe any bugs indicated by SQLancer that you do not consider bugs, something is likely wrong with your setup. Finally, related to the aforementioned point, SQLancer is specific to a version of the DBMS, and you can find the version against which we are tested in our [GitHub Actions workflow](https://github.com/sqlancer/sqlancer/blob/documentation/.github/workflows/main.yml). If you are testing against another version, you might observe various false alarms (e.g., caused by syntax errors). While we would always like for SQLancer to be up-to-date with the latest development version of each DBMS, we lack the resources to achieve this.
-## Logs
+**Supported DBMSs.** SQLancer requires DBMS-specific code for each DBMS that it supports. As of January 2025, it provides support for Citus, ClickHouse, CnosDB, CockroachDB, Databend, (Apache) DataFusion, (Apache) Doris, DuckDB, H2, HSQLDB, MariaDB, Materialize, MySQL, OceanBase, PostgreSQL, Presto, QuestDB, SQLite3, TiDB, and YugabyteDB. The extent to which the individual DBMSs are supported [differs](https://github.com/sqlancer/sqlancer/blob/documentation-approaches/CONTRIBUTING.md).
-SQLancer stores logs in the `target/logs` subdirectory. By default, the option `--log-each-select` is enabled, which results in every SQL statement that is sent to the DBMS being logged. The corresponding file names are postfixed with `-cur.log`. In addition, if SQLancer detects a logic bug, it creates a file with the extension `.log`, in which the statements to reproduce the bug are logged.
+# Approaches and Papers
-## Reducing a Bug
+SQLancer has pioneered and includes multiple approaches for DBMS testing, as outlined below in chronological order.
-After finding a bug, it is useful to produce a minimal test case before reporting the bug, to save the DBMS developers' time and effort. For many test cases, [C-Reduce](https://embed.cs.utah.edu/creduce/) does a great job. In addition, we have been working on a SQL-specific reducer, which we plan to release soon.
+| Technique | Venue | Links | Description |
+|-----------------------------------------------------------------|---------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| Pivoted Query Synthesis (PQS) | OSDI 2020 | [Paper](https://www.usenix.org/system/files/osdi20-rigger.pdf) [Video](https://www.youtube.com/watch?v=0aeDyXgzo04 ) | PQS is the first technique that we designed and implemented. It randomly selects a row, called a pivot row, for which a query is generated that is guaranteed to fetch the row. If the row is not contained in the result set, a bug has been detected. It is fully described here. PQS effectively detects bugs, but requires more implementation effort than other testing approaches that follow a metamorphic testing or differential testing methodology. Thus, it is currently unmaintained. |
+| Non-optimizing Reference Engine Construction (NoREC) | ESEC/FSE 2020 | [Paper](https://arxiv.org/abs/2007.08292) [Video](https://www.youtube.com/watch?v=4mbzytrWJhQ) | NoREC aims to find optimization bugs. It translates a query that is potentially optimized by the DBMS to one for which hardly any optimizations are applicable, and compares the two result sets. A mismatch between the result sets indicates a bug in the DBMS. The approach applies primarily to simple queries with a filter predicate. |
+| Ternary Logic Partitioning (TLP) | OOPSLA 2020 | [Paper](https://dl.acm.org/doi/pdf/10.1145/3428279) [Video](https://www.youtube.com/watch?v=FN9OLbGh0VI) | TLP partitions a query into three partitioning queries, whose results are composed and compared to the original query's result set. A mismatch in the result sets indicates a bug in the DBMS. In contrast to NoREC and PQS, it can detect bugs in advanced features such as aggregate functions. It is among the most widely adopted testing techniques. |
+| Differential Query Execution (DQE) | ICSE 2023 | [Paper](https://ieeexplore.ieee.org/document/10172736) [Code](https://github.com/sqlancer/sqlancer/pull/1251) | Differential Query Execution (DQE) is a novel and general approach to detect logic bugs in SELECT, UPDATE and DELETE queries. DQE solves the test oracle problem by executing SELECT, UPDATE and DELETE queries with the same predicate φ, and observing inconsistencies among their execution results. For example, if a row that is updated by an UPDATE query with a predicate φ does not appear in the query result of a SELECT query with the same predicate φ, a logic bug is detected in the target DBMS. We append two extra columns to each table in a database to uniquely identify each row and track whether a row has been modified. We further rewrite SELECT and UPDATE queries to identify their accessed rows. DQE supports MySQL. |
+| Query Plan Guidance (QPG) | ICSE 2023 | [Paper](https://arxiv.org/pdf/2312.17510) [Video](https://youtu.be/6EjQ1cKiZJU?si=gh7uoykRqNjl3GXR&t=1820) [Code](https://github.com/sqlancer/sqlancer/issues/641) | QPG is a feedback-guided test case generation approach. It is based on the insights that query plans capture whether interesting behavior is exercised within the DBMS. It works by mutating the database state when no new query plans have been observed after executing a number of queries, expecting that the new state enables new query plans to be triggered. This approach is enabled by option `--qpg-enable` and supports TLP and NoREC oracles for SQLite, CockroachDB, TiDB, and Materialize. It is the only approach that specifically tackles the test input generation problem. |
+| Cardinality Estimation Restriction Testing (CERT) | ICSE 2024 | [Paper](https://arxiv.org/pdf/2306.00355) [Code](https://github.com/sqlancer/sqlancer/issues/822) | CERT aims to find performance issues through unexpected estimated cardinalities, which represent the estimated number of returned rows. From a given input query, it derives a more restrictive query, whose estimated cardinality should be no more than that of the original query. A violation indicates a potential performance issue. CERT supports TiDB, CockroachDB, and MySQL. CERT is the only test oracle that is part of SQLancer that was designed to find performance issues. |
+| Differential Query Plans (DQP) | SIGMOD 2024 | [Paper](https://dl.acm.org/doi/pdf/10.1145/3654991) [Video](https://www.youtube.com/watch?v=9Qp7quJfGEk) [Code](https://github.com/sqlancer/sqlancer/issues/918) | DQP aims to find logic bugs by controlling the execution of different query plans for a given query and validating that they produce a consistent result. DQP supports MySQL, MariaDB, and TiDB. |
+| Constant Optimization Driven Database System Testing (CODDTest) | SIGMOD 2025 | [Code](https://github.com/sqlancer/sqlancer/pull/1054) | CODDTest finds logic bugs in DBMSs, including in advanced features such as subqueries. It is based on the insight that we can assume the database state to be constant for a database session, which then enables us to substitute parts of a query with their results, essentially corresponding to constant folding and constant propagation, which are two traditional compiler optimizations. |
-## Found Bugs
+Please find the `.bib` entries [here](docs/PAPERS.md). |
-We would appreciate it if you mention SQLancer when you report bugs found by it. We would also be excited to know if you are using SQLancer to find bugs, or if you have extended it to test another DBMS (also if you do not plan to contribute it to this project). SQLancer has found over 400 bugs in widely-used DBMS, which are listed [here](https://www.manuelrigger.at/dbms-bugs/).
+# FAQ
+**I am running SQLancer on the latest version of a supported DBMS. Is it expected that SQLancer prints many AssertionErrors?** In many cases, SQLancer does not support the latest version of a DBMS. You can check the [`.github/workflows/main.yml`](https://github.com/sqlancer/sqlancer/blob/master/.github/workflows/main.yml) file to determine which version we use in our CI tests, which corresponds to the currently supported version of that DBMS. SQLancer should print only an `AssertionError` and produce a corresponding log file, if it has identified a bug. To upgrade SQLancer to support a new DBMS version, either two options are advisable: (1) the generators can be updated to no longer generate certain patterns that might cause errors (e.g., which might be the case if a keyword or option is no longer supported) or (2) the newly-appearing errors can be added as [expected errors](https://github.com/sqlancer/sqlancer/blob/354d591cfcd37fa1de85ec77ec933d5d975e947a/src/sqlancer/common/query/ExpectedErrors.java) so that SQLancer ignores them when they appear (e.g., this is useful if some error-inducing patterns cannot easily be avoided).
-# Community
+Another reason for many failures on a supported version could be that error messages are printed in a non-English locale (which would then be visible in the stack trace). In such a case, try setting the DBMS' locale to English (e.g., see the [PostgreSQL homepage](https://www.postgresql.org/docs/current/locale.html)).
-We have created a [Slack workspace](https://join.slack.com/t/sqlancer/shared_invite/zt-eozrcao4-ieG29w1LNaBDMF7OB_~ACg) to discuss SQLancer, and DBMS testing in general. SQLancer's official Twitter handle is [@sqlancer_dbms](https://twitter.com/sqlancer_dbms).
+**When starting SQLancer, I get an error such as "database 'test' does not exist". How can I run SQLancer without this error?** For some DBMSs, SQLancer expects that a database "test" exists, which it then uses as an initial database to connect to. If you have not yet created such a database, you can use a command such as `CREATE DATABASE test` to create this database (e.g., see the [PostgreSQL documentation](https://www.postgresql.org/docs/current/sql-createdatabase.html)).
+# Links
-# Additional Documentation
+Documentation and resources:
* [Contributing to SQLancer](CONTRIBUTING.md)
* [Papers and .bib entries](docs/PAPERS.md)
+* More information on our DBMS testing efforts and the bugs we found is available [here](https://www.manuelrigger.at/dbms-bugs/).
-# Releases
-
-Official release are available on:
-* [GitHub](https://github.com/sqlancer/sqlancer/releases)
-* [Maven Central](https://search.maven.org/artifact/com.sqlancer/sqlancer)
-* [DockerHub](https://hub.docker.com/r/mrigger/sqlancer)
-
-# Additional Resources
+Videos:
+* [SQLancer Tutorial Playlist](https://www.youtube.com/playlist?list=PLm7ofmclym1E2LwBeSer_AAhzBSxBYDci)
+* [SQLancer Talks](https://youtube.com/playlist?list=PLm7ofmclym1E9-AbYy-PkrMfHpB9VdlZJ)
-* A talk on Ternary Logic Partitioning (TLP) and SQLancer is available on [YouTube](https://www.youtube.com/watch?v=Np46NQ6lqP8).
-* An (older) Pivoted Query Synthesis (PQS) talk is available on [YouTube](https://www.youtube.com/watch?v=yzENTaWe7qg).
-* PingCAP has implemented PQS, NoREC, and TLP in a tool called [go-sqlancer](https://github.com/chaos-mesh/go-sqlancer).
-* More information on our DBMS testing efforts and the bugs we found is available [here](https://www.manuelrigger.at/dbms-bugs/).
+Closely related tools:
+* [go-sqlancer](https://github.com/chaos-mesh/go-sqlancer): re-implementation of some of SQLancer's approaches in Go by PingCAP
+* [Jepsen](https://github.com/jepsen-io): testing of distributed (database) systems
+* [SQLRight](https://github.com/PSU-Security-Universe/sqlright): coverage-guided DBMS fuzzer, also supporting NoREC and TLP
+* [SQLsmith](https://github.com/anse1/sqlsmith): random SQL query generator used for fuzzing
+* [Squirrel](https://github.com/s3team/Squirrel): coverage-guided DBMS fuzzer
diff --git a/configs/spotbugs-exclude.xml b/configs/spotbugs-exclude.xml
index 366e8ac55..7fa4de560 100644
--- a/configs/spotbugs-exclude.xml
+++ b/configs/spotbugs-exclude.xml
@@ -7,9 +7,19 @@
+
+
+
+
+
-
+
+
+
+
+
+
diff --git a/docs/PAPERS.md b/docs/PAPERS.md
index ca9d40de7..a42b42c12 100644
--- a/docs/PAPERS.md
+++ b/docs/PAPERS.md
@@ -1,6 +1,6 @@
# Papers
-The testing approaches implemented in SQLancer are described in the three papers below.
+The testing approaches implemented in SQLancer are described in the four papers below.
## Testing Database Engines via Pivoted Query Synthesis
@@ -51,6 +51,51 @@ This paper describes TLP, a metamorphic testing approach that can detect various
}
```
+## Testing Database Engines via Query Plan Guidance
+
+This paper describes Query Plan Guidance (QPG), a test case generation method guided by query plan coverage. This method can be paired with above three testing methods. A preprint is available [here](http://bajinsheng.github.io/assets/pdf/qpg_icse23.pdf).
+
+```
+@inproceedings{Ba2023QPG,
+ author = {Ba, Jinsheng and Rigger, Manuel},
+ title = {Testing Database Engines via Query Plan Guidance},
+ booktitle = {The 45th International Conference on Software Engineering (ICSE'23)},
+ year = {2023},
+ month = may
+}
+```
+
+## CERT: Finding Performance Issues in Database Systems Through the Lens of Cardinality Estimation
+
+This paper describes CERT, a testing approach to find performance issues by inspecting inconsistent estimated cardinalities. A preprint is available [here](https://bajinsheng.github.io/assets/pdf/cert_icse24.pdf).
+
+```
+@inproceedings{cert,
+ author = {Ba, Jinsheng and Rigger, Manuel},
+ title = {CERT: Finding Performance Issues in Database Systems Through the Lens of Cardinality Estimation},
+ booktitle = {The 46th International Conference on Software Engineering (ICSE'24)},
+ year = {2024},
+ month = apr,
+}
+```
+
+## Keep It Simple: Testing Databases via Differential Query Plans
+
+This paper describes DQP, a testing approach to find logic bugs in database systems by comparing the query plans of different database systems. A preprint is available [here](https://bajinsheng.github.io/assets/pdf/dqp_sigmod24.pdf).
+
+```
+@article{dqp,
+ author = {Ba, Jinsheng and Rigger, Manuel},
+ title = {Keep It Simple: Testing Databases via Differential Query Plans},
+ year = {2024},
+ issue_date = {June 2024},
+ publisher = {Association for Computing Machinery},
+ address = {New York, NY, USA},
+ journal = {Proceeding of ACM Management of Data (SIGMOD'24)},
+ month = jun
+}
+```
+
# Comparing SQLancer With Other Tools that Find Logic Bugs
If you want to fairly compare other tools with SQLancer, we would be glad to provide feedback (e.g., feel free to send an email to manuel.rigger@inf.ethz.ch). We have the following general recommendations and comments:
diff --git a/docs/QueryPlanGuidance.md b/docs/QueryPlanGuidance.md
new file mode 100644
index 000000000..bb467461b
--- /dev/null
+++ b/docs/QueryPlanGuidance.md
@@ -0,0 +1,66 @@
+# Query Plan Guidance
+Query Plan Guidance (QPG) is a test case generation method that attempts to explore unseen query plans. Given a database state, we mutate it after no new unique query plans have been observed by randomly-generated queries on the database state aiming to cover more unique query plans for exposing more logics of DBMSs. Here, we document all mutators in which we choose the most promising one that may help covering more unique query plans to execute.
+
+# Mutators
+All mutators are listed below and implemented in the enumeration variables `Action` in the `XXDBProvider.java` file of each DBMS.
+The `Mutator` column includes the items in the `Action` enumeration variable.
+The `Example` column includes an example of a realistic statement generated by this mutator.
+The `Description` column includes an explanation of what the mutator does.
+The `More unique query plans...` column explains why applying this mutator may help covering more unique query plans.
+
+
+|DBMS |Mutator |Example |Description |More unique query plans may be covered because of |
+|-----------|---------------------|--------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------|--------------------------------------------------------|
+|SQLite |PRAGMA |PRAGMA automatic_index true; |It modifies database options. |different options that decide how to execute statements.|
+|SQLite |CREATE_INDEX |CREATE INDEX i0 ON t0 WHERE c0 ISNULL; |It adds a new index on a table. |subsequent differnt logic of querying data. |
+|SQLite |CREATE_VIEW |CREATE VIEW v0(c0) AS SELECT DISTINCT ABS(t0.c2) FROM t0; |It adds a new view from existing tables. |more possible execution logics on the view. |
+|SQLite |CREATE_TABLE |CREATE TABLE t0 (c0 INT CHECK ((c0) BETWEEN (1) AND (10)) ); |It adds a new table. |more possible execution logics on the table. |
+|SQLite |CREATE_VIRTUALTABLE |CREATE VIRTUAL TABLE vt1 USING fts5(c0 UNINDEXED); |It adds a new table with fts5 feature. |more possible execution logics on the table with fts5. |
+|SQLite |CREATE_RTREETABLE |CREATE VIRTUAL TABLE rt0 USING rtree_i32(c0, c1, c2, c3, c4); |It adds a new table with rtree feature. |more possible execution logics on the table with rtree. |
+|SQLite |INSERT |INSERT INTO t0(c0, c1) VALUES ('lrd+a*', NULL); |It inserts a new row to a table. |subsequent different logic of querying data. |
+|SQLite |DELETE |DELETE FROM t0 WHERE (c0>3); |It deletes specific rows from a table. |subsequent different logic of querying data. |
+|SQLite |ALTER |ALTER TABLE t0 ADD COLUMN c39 REAL; |It changes the schema of a table. |more possible execution logics on the changed table. |
+|SQLite |UPDATE |UPDATE t0 SET (c2, c0)=(-944, 'L((xA') WHERE t0.c1; |It updates specific data of a table. |subsequent different logic of querying data. |
+|SQLite |DROP_INDEX |DROP INDEX i0; |It drops an index. |subsequent different logic of querying data. |
+|SQLite |DROP_TABLE |DROP TABLE t0; |it drops an table. |subsequent different logic of querying data. |
+|SQLite |DROP_VIEW |DROP VIEW v0; |It drops a view. |subsequent different logic of querying data. |
+|SQLite |VACUUM |VACUUM main; |It rebuilds the database file. |subsequent different logic of querying data. |
+|SQLite |REINDEX |REINDEX t0; |It drops and recreates indexes from scratch. |subsequent different logic of querying data. |
+|SQLite |ANALYZE |ANALYZE t0; |It gathers statistics about tables to help make better query planning choices.|subsequent different logic of querying data. |
+|SQLite |EXPLAIN |EXPLAIN SELECT * FROM t0; |It obtains query plan of a query. |subsequent different logic of querying data. |
+|SQLite |CHECK_RTREE_TABLE |SELECT rtreecheck('rt0'); |It runs an integrity check on a table. |subsequent different logic of querying data. |
+|SQLite |VIRTUAL_TABLE_ACTION |INSERT INTO vt0(vt0) VALUES('rebuild'); |It changes the options of a virtual table. |subsequent different logic of querying data. |
+|SQLite |MANIPULATE_STAT_TABLE|INSERT INTO sqlite_stat1 VALUES('rt0', 't1', '2'); |It changes the table that stores statistics of all tables. |subsequent different logic of querying data. |
+|SQLite |TRANSACTION_START |BEGIN TRANSACTION; |All statements after this will not be committed. |subsequent different logic of querying data. |
+|SQLite |ROLLBACK_TRANSACTION |ROLLBACK TRANSACTION; |All statements after last BEGIN are dropped. |subsequent different logic of querying data. |
+|SQLite |COMMIT |COMMIT; |All statements after last BEGIN are committed |subsequent different logic of querying data. |
+|TiDB |CREATE_TABLE |CREATE TABLE t1(c0 INT); |It adds a new table. |more possible execution logics on the table. |
+|TiDB |CREATE_INDEX |CREATE INDEX i0 ON t0(c0(250) ASC) KEY_BLOCK_SIZE 1564693810209727437; |It adds a new index on a table. |subsequent differnt logic of querying data. |
+|TiDB |VIEW_GENERATOR |CREATE VIEW v0(c0, c1) AS SELECT t1.c0, ((t1.c0)REGEXP('8')) FROM t1; |It adds a new view from existing tables. |more possible execution logics on the view. |
+|TiDB |INSERT |INSERT INTO t0(c0) VALUES (-16387); |It inserts a new row to a table. |subsequent different logic of querying data. |
+|TiDB |ALTER_TABLE |ALTER TABLE t1 ADD PRIMARY KEY(c0); |It changes the schema of a table. |more possible execution logics on the changed table. |
+|TiDB |TRUNCATE |TRUNCATE t0; |It drops all rows of a table. |subsequent different logic of querying data. |
+|TiDB |UPDATE |UPDATE t0 SET c0='S' WHERE t0.c0; |It updates specific data of a table. |subsequent different logic of querying data. |
+|TiDB |DELETE |DELETE FROM t0 ORDER BY CAST(t0.c0 AS CHAR) DESC; |It deletes specific rows from a table. |subsequent different logic of querying data. |
+|TiDB |SET |set @@tidb_max_chunk_size=8864; |It modifies database options. |different options that decide how to execute statements.|
+|TiDB |ADMIN_CHECKSUM_TABLE |ADMIN CHECKSUM TABLE t0; |it calculate the checksum for a table. |subsequent different logic of querying data. |
+|TiDB |ANALYZE_TABLE |ANALYZE TABLE t1 WITH 174 BUCKETS; |It gathers statistics about tables to help make better query planning choices.|subsequent different logic of querying data. |
+|TiDB |DROP_TABLE |DROP TABLE t0; |it drops an table. |subsequent different logic of querying data. |
+|TiDB |DROP_VIEW |DROP VIEW v0; |It drops a view. |subsequent different logic of querying data. |
+|CockroachDB|CREATE_TABLE |CREATE TABLE t1 (c0 INT4, c1 VARBIT(44) UNIQUE DEFAULT (B'000'), CONSTRAINT "primary" PRIMARY KEY(c1 ASC, c0 ASC));|It adds a new table. |more possible execution logics on the table. |
+|CockroachDB|CREATE_INDEX |CREATE INDEX ON t0(rowid); |It adds a new index on a table. |subsequent differnt logic of querying data. |
+|CockroachDB|CREATE_VIEW |CREATE VIEW v0(c0) AS SELECT DISTINCT MIN(TIMETZ '1970-01-11T12:19:44') FROM t0; |It adds a new view from existing tables. |more possible execution logics on the view. |
+|CockroachDB|CREATE_STATISTICS |CREATE STATISTICS s0 FROM t2; |It gathers statistics about tables to help make better query planning choices.|subsequent different logic of querying data. |
+|CockroachDB|INSERT |INSERT INTO t1 (rowid, c0) VALUES(NULL, true) ON CONFLICT (c0) DO NOTHING ; |It inserts a new row to a table. |subsequent different logic of querying data. |
+|CockroachDB|UPDATE |UPDATE t0@{FORCE_INDEX=t0_pkey} SET c0=t0.c0; |It updates specific data of a table. |subsequent different logic of querying data. |
+|CockroachDB|SET_SESSION |SET SESSION BYTEA_OUTPUT=escape; |It changes session configurations. |different options that decide how to execute statements.|
+|CockroachDB|SET_CLUSTER_SETTING |SET CLUSTER SETTING sql.query_cache.enabled=true; |It changes cluster configurations. |different options that decide how to execute statements.|
+|CockroachDB|DELETE |DELETE from t0; |It deletes specific rows from a table. |subsequent different logic of querying data. |
+|CockroachDB|TRUNCATE |TRUNCATE TABLE t1 CASCADE; |It drops all rows of a table. |subsequent different logic of querying data. |
+|CockroachDB|DROP_TABLE |DROP TABLE t0; |it drops an table. |subsequent different logic of querying data. |
+|CockroachDB|DROP_VIEW |DROP VIEW v0; |It drops a view. |subsequent different logic of querying data. |
+|CockroachDB|COMMENT_ON |COMMENT ON INDEX t0_c0_key IS '|?'; |It changes schema of a table. |subsequent different logic of querying data. |
+|CockroachDB|SHOW |SHOW LOCALITY; |It lists detailed information of active queries. |subsequent different logic of querying data. |
+|CockroachDB|EXPLAIN |EXPLAIN SELECT * FROM t0; |It obtains query plan of a query. |subsequent different logic of querying data. |
+|CockroachDB|SCRUB |EXPERIMENTAL SCRUB table t0; |It checks data corruption of a table. |subsequent different logic of querying data. |
+|CockroachDB|SPLIT |ALTER INDEX t0@t0_c0_key SPLIT AT VALUES (NULL); |It changes the indexes. |subsequent different logic of querying data. |
diff --git a/docs/testCaseReduction.md b/docs/testCaseReduction.md
new file mode 100644
index 000000000..ee317f791
--- /dev/null
+++ b/docs/testCaseReduction.md
@@ -0,0 +1,50 @@
+# Test Case Reduction
+SQLancer generates a large number of statements, but not all of them are relevant to the bug. To automatically reduce the test cases, two reducers were implemented: the statement reducer and the AST-based reducer.
+
+## Statement Reducer
+The statement reducer utilizes the delta-debugging technique to remove irrelevant statements. More details of delta-debugging could be found in this paper: [Simplifying and Isolating Failure-Inducing Input](https://www.cs.purdue.edu/homes/xyzhang/fall07/Papers/delta-debugging.pdf).
+
+Using the statement reducer, SQLancer reduces the set of statements to a minimal subset that reproduces the bug.
+
+## AST-Based Reducer
+The AST-based reducer can shorten a statement by applying AST level transformations, including removing unnecessary clauses, irrelevant elements in a list, simplify complicated expressions and etc.
+
+The transformations are implemented by [JSQLParser](https://github.com/JSQLParser/JSqlParser), a RDBMS agnostic SQL statement parser that can translate SQL statements into a traversable hierarchy of Java classes. JSQLParser provides support for the SQL standard as well as major SQL dialects. The AST-based reducer works for any SQL dialects that can be parsed by this tool.
+
+## Implementing reproducer
+Determining whether a bug persists after reducing statements
+is an undecidable task for general transformations.
+In practice, reducers use the [reproducer](../src/sqlancer/Reproducer.java) to determine
+if a bug remains after statements have been removed or modified.
+The reducer's responsibility is to verify if the current state,
+formed by the pared-down statements,
+continues to yield incorrect results for specific queries.
+
+Different oracles have distinct logic for determination,
+meaning a universal reproducer doesn't exist.
+Each oracle type needs its own reproducer implementation.
+If reproducer is not implemented for specific oracle,
+test case reduction is not available while using the oracle.
+
+Oracles for which reproducers have currently been implemented include:
+1. for [`SQLite3NoRECOracle`](../src/sqlancer/sqlite3/oracle/SQLite3NoRECOracle.java)
+2. for [`TiDBTLPWhereOracle`](../src/sqlancer/tidb/oracle/TiDBTLPWhereOracle.java)
+
+## Using reducers
+Test-case reduction is disabled by default. The statement reducer can be enabled by passing `--use-reducer` when starting SQLancer. If you wish to further shorten each statements, you need to additionally pass the `--reduce-ast` parameter so that the AST-based reduction is applied.
+
+Note: if `--reduce-ast` is set, `--use-reducer` option must be enabled first.
+
+There are also options to define timeout seconds and max steps of reduction for both statement reducer and AST-based reducer.
+
+```
+--statement-reducer-max-steps=
+--statement-reducer-max-time=
+--ast-reducer-max-steps=
+--ast-reducer-max-time=
+```
+
+## Reduction logs
+If test-case reduction is enabled, each time the reducer performs a reduction step successfully,it prints the reduced statements to the log file, overwriting the previous ones.
+
+The log files will be stored in the following format: `logs//reduce/-reduce.log`. For instance, if the tested DBMS is SQLite3 and the current database is named database0, the log file will be located at `logs/sqlite3/reduce/database0-reduce.log`.
diff --git a/pom.xml b/pom.xml
index 234d805f1..7c9a1106b 100644
--- a/pom.xml
+++ b/pom.xml
@@ -44,7 +44,7 @@
org.apache.maven.plugins
maven-shade-plugin
- 3.3.0
+ 3.4.0
package
@@ -89,7 +89,7 @@
org.jacoco
jacoco-maven-plugin
- 0.8.8
+ 0.8.12
@@ -123,7 +123,7 @@
org.codehaus.plexus
plexus-compiler-eclipse
- 2.12.1
+ 2.13.0
org.eclipse.jdt
@@ -133,7 +133,7 @@
org.codehaus.plexus
plexus-compiler-api
- 2.12.1
+ 2.13.0
@@ -154,7 +154,7 @@
org.apache.maven.plugins
maven-dependency-plugin
- 3.2.0
+ 3.4.0
copy-dependencies
@@ -175,7 +175,7 @@
org.apache.maven.plugins
maven-jar-plugin
- 3.2.2
+ 3.3.0
true
@@ -209,7 +209,7 @@
com.puppycrawl.tools
checkstyle
- 10.3.3
+ 10.5.0
@@ -252,7 +252,7 @@
com.github.spotbugs
spotbugs-maven-plugin
- 4.7.1.1
+ 4.7.3.0
spotbugs
@@ -284,12 +284,22 @@
org.postgresql
postgresql
- 42.5.0
+ 42.5.1
+
+
+ com.ing.data
+ cassandra-jdbc-wrapper
+ 4.7.0
+
+
+ com.yugabyte
+ jdbc-yugabytedb
+ 42.3.5-yb-1
org.xerial
sqlite-jdbc
- 3.36.0.3
+ 3.49.1.0
mysql
@@ -299,23 +309,28 @@
org.mariadb.jdbc
mariadb-java-client
- 3.0.7
+ 3.1.0
org.duckdb
duckdb_jdbc
- 0.4.0
+ 1.3.0.0
+
+
+ com.facebook.presto
+ presto-jdbc
+ 0.283
org.junit.jupiter
junit-jupiter-engine
- 5.9.0
+ 5.11.2
test
org.slf4j
- slf4j-simple
- 1.7.36
+ slf4j-simple
+ 2.0.6
ru.yandex.clickhouse
@@ -325,7 +340,7 @@
com.h2database
h2
- 2.1.214
+ 2.3.232
org.mongodb
@@ -337,6 +352,85 @@
arangodb-java-driver
6.9.0
+
+ org.questdb
+ questdb
+ 6.5.3
+
+
+ org.hsqldb
+ hsqldb
+ 2.7.4
+ runtime
+
+
+ org.apache.commons
+ commons-csv
+ 1.9.0
+
+
+ com.github.jsqlparser
+ jsqlparser
+ 4.6
+
+
+ org.apache.arrow
+ flight-sql-jdbc-driver
+ 16.1.0
+
+
+ org.apache.hive
+ hive-jdbc
+ 3.1.2
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
+
+
+
+
+ org.apache.hive
+ hive-serde
+ 4.0.1
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
+
+
+
+
+ org.apache.hive
+ hive-cli
+ 4.0.1
+
+
+ org.apache.logging.log4j
+ log4j-slf4j-impl
+
+
+
+
+ org.apache.logging.log4j
+ log4j-api
+ 2.24.3
+
+
+ org.apache.logging.log4j
+ log4j-core
+ 2.24.3
+
+
+ org.apache.logging.log4j
+ log4j-slf4j2-impl
+ 2.24.3
+
+
+ org.apache.hadoop
+ hadoop-common
+ 3.2.4
+
@@ -435,5 +529,23 @@
+
+ datafusion-tests
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+ 3.3.0
+
+
+ **/TestDataFusion.java
+
+ --add-opens java.base/java.nio=org.apache.arrow.memory.core,ALL-UNNAMED
+
+
+
+
+
diff --git a/src/check_names.py b/src/check_names.py
index f76b881ab..f2ab346c6 100644
--- a/src/check_names.py
+++ b/src/check_names.py
@@ -1,28 +1,56 @@
import os
+import sys
+from typing import List
-def get_java_files(directory):
- java_files = []
- for root, dirs, files in os.walk(directory):
- for f in files:
- if f.endswith('.java'):
- java_files.append(f)
- return java_files
-
-def verify_prefix(prefix, files):
- if len(files) == 0:
- print(prefix + ' directory does not contain any files!')
- exit(-1)
- for f in files:
- if not f.startswith(prefix):
- print('The class name of ' + f + ' does not start with ' + prefix)
- exit(-1)
-
-# TODO: ClickHouse (wait for https://github.com/sqlancer/sqlancer/pull/39)
-verify_prefix('CockroachDB', get_java_files("sqlancer/cockroachdb/"))
-verify_prefix('DuckDB', get_java_files("sqlancer/duckdb"))
-verify_prefix('MariaDB', get_java_files("sqlancer/mariadb/"))
-verify_prefix('MySQL', get_java_files("sqlancer/mysql/"))
-verify_prefix('Postgres', get_java_files("sqlancer/postgres/"))
-verify_prefix('SQLite3', get_java_files("sqlancer/sqlite3/"))
-verify_prefix('TiDB', get_java_files("sqlancer/tidb/"))
+def get_java_files(directory_path: str) -> List[str]:
+ java_files: List[str] = []
+ for root, dirs, files in os.walk(directory_path):
+ for f in files:
+ if f.endswith('.java'):
+ java_files.append(f)
+ return java_files
+
+
+def verify_one_db(prefix: str, files: List[str]):
+ print('checking database, name: {0}, files: {1}'.format(prefix, files))
+ if len(files) == 0:
+ print(prefix + ' directory does not contain any files!', file=sys.stderr)
+ exit(-1)
+ for f in files:
+ if not f.startswith(prefix):
+ print('The class name of ' + f + ' does not start with ' + prefix, file=sys.stderr)
+ exit(-1)
+ print('checking database pass: ', prefix)
+
+
+def verify_all_dbs(name_to_files: dict[str:List[str]]):
+ for db_name, files in name_to_files.items():
+ verify_one_db(db_name, files)
+
+
+if __name__ == '__main__':
+ cwd = os.getcwd()
+ print("Current working directory: {0}".format(cwd))
+ name_to_files: dict[str:List[str]] = dict()
+ name_to_files["Citus"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "citus"))
+ name_to_files["ClickHouse"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "clickhouse"))
+ name_to_files["CnosDB"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "cnosdb"))
+ name_to_files["CockroachDB"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "cockroachdb"))
+ name_to_files["Databend"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "databend"))
+ name_to_files["DataFusion"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "datafusion"))
+ name_to_files["DuckDB"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "duckdb"))
+ name_to_files["H2"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "h2"))
+ name_to_files["HSQLDB"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "hsqldb"))
+ name_to_files["MariaDB"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "mariadb"))
+ name_to_files["Materialize"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "materialize"))
+ name_to_files["MySQL"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "mysql"))
+ name_to_files["OceanBase"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "oceanbase"))
+ name_to_files["Postgres"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "postgres"))
+ name_to_files["Presto"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "presto"))
+ name_to_files["QuestDB"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "questdb"))
+ name_to_files["SQLite3"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "sqlite3"))
+ name_to_files["TiDB"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "tidb"))
+ name_to_files["Y"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "yugabyte")) # has both YCQL and YSQL prefixes
+ name_to_files["Doris"] = get_java_files(os.path.join(cwd, "src", "sqlancer", "doris"))
+ verify_all_dbs(name_to_files)
diff --git a/src/sqlancer/ASTBasedReducer.java b/src/sqlancer/ASTBasedReducer.java
new file mode 100644
index 000000000..876a2da12
--- /dev/null
+++ b/src/sqlancer/ASTBasedReducer.java
@@ -0,0 +1,144 @@
+package sqlancer;
+
+import java.time.Duration;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.List;
+
+import sqlancer.common.query.Query;
+import sqlancer.common.query.SQLQueryAdapter;
+import sqlancer.transformations.RemoveClausesOfSelect;
+import sqlancer.transformations.RemoveColumnsOfSelect;
+import sqlancer.transformations.RemoveElementsOfExpressionList;
+import sqlancer.transformations.RemoveRowsOfInsert;
+import sqlancer.transformations.RemoveUnions;
+import sqlancer.transformations.RoundDoubleConstant;
+import sqlancer.transformations.SimplifyConstant;
+import sqlancer.transformations.SimplifyExpressions;
+import sqlancer.transformations.Transformation;
+
+public class ASTBasedReducer, O extends DBMSSpecificOptions>, C extends SQLancerDBConnection>
+ implements Reducer {
+
+ private final DatabaseProvider provider;
+
+ @SuppressWarnings("unused")
+ private G state;
+ private G newGlobalState;
+ private Reproducer reproducer;
+
+ private List> reducedStatements;
+ // statement after reduction.
+
+ public ASTBasedReducer(DatabaseProvider provider) {
+ this.provider = provider;
+ }
+
+ @SuppressWarnings("unchecked")
+ private void updateStatements(String queryString, int index) {
+ boolean couldAffectSchema = queryString.contains("CREATE TABLE") || queryString.contains("EXPLAIN");
+ reducedStatements.set(index, (Query) new SQLQueryAdapter(queryString, couldAffectSchema));
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public void reduce(G state, Reproducer reproducer, G newGlobalState) throws Exception {
+ this.state = state;
+ this.newGlobalState = newGlobalState;
+ this.reproducer = reproducer;
+
+ long maxReduceTime = state.getOptions().getMaxStatementReduceTime();
+ long maxReduceSteps = state.getOptions().getMaxStatementReduceSteps();
+
+ List> initialBugInducingStatements = state.getState().getStatements();
+ newGlobalState.getState().setStatements(new ArrayList<>(initialBugInducingStatements));
+
+ List transformations = new ArrayList<>();
+
+ transformations.add(new RemoveUnions());
+ transformations.add(new RemoveClausesOfSelect());
+ transformations.add(new RemoveRowsOfInsert());
+ transformations.add(new RemoveColumnsOfSelect());
+ transformations.add(new RemoveElementsOfExpressionList());
+ transformations.add(new SimplifyExpressions());
+ transformations.add(new SimplifyConstant());
+ transformations.add(new RoundDoubleConstant());
+
+ Transformation.setBugJudgement(() -> {
+ try {
+ return this.bugStillTriggers();
+ } catch (Exception ignored) {
+ }
+ return false;
+ });
+
+ boolean observeChange;
+ reducedStatements = new ArrayList<>();
+ for (Query> query : initialBugInducingStatements) {
+ reducedStatements.add((Query) query);
+ }
+
+ Instant startTime = Instant.now();
+ reduceProcess: do {
+ observeChange = false;
+ for (Transformation t : transformations) {
+ for (int i = 0; i < reducedStatements.size(); i++) {
+
+ Instant currentTime = Instant.now();
+ if (maxReduceTime != MainOptions.NO_REDUCE_LIMIT
+ && Duration.between(startTime, currentTime).getSeconds() >= maxReduceTime) {
+ break reduceProcess;
+ }
+
+ if (maxReduceSteps != MainOptions.NO_REDUCE_LIMIT
+ && Transformation.getReduceSteps() >= maxReduceSteps) {
+ break reduceProcess;
+ }
+
+ Query> query = reducedStatements.get(i);
+ boolean initFlag = t.init(query.getQueryString());
+ int index = i;
+ t.setStatementChangedCallBack((statementString) -> {
+ updateStatements(statementString, index);
+ });
+
+ if (!initFlag) {
+ newGlobalState.getLogger()
+ .logReducer("warning: failed parsing the statement at transformer : " + t);
+ continue;
+ }
+ t.apply();
+ observeChange |= t.changed();
+ }
+ }
+ } while (observeChange);
+
+ newGlobalState.getState().setStatements(new ArrayList<>(reducedStatements));
+ newGlobalState.getLogger().logReduced(newGlobalState.getState());
+ }
+
+ public boolean bugStillTriggers() throws Exception {
+ try (C con2 = provider.createDatabase(newGlobalState)) {
+ newGlobalState.setConnection(con2);
+ List> candidateStatements = new ArrayList<>(reducedStatements);
+ newGlobalState.getState().setStatements(new ArrayList<>(candidateStatements));
+
+ for (Query s : candidateStatements) {
+ try {
+ s.execute(newGlobalState);
+ } catch (Throwable ignoredException) {
+ // ignore
+ }
+ }
+ try {
+ if (reproducer.bugStillTriggers(newGlobalState)) {
+ newGlobalState.getLogger().logReduced(newGlobalState.getState());
+ return true;
+ }
+ } catch (Throwable ignoredException) {
+
+ }
+ }
+ return false;
+ }
+}
diff --git a/src/sqlancer/ComparatorHelper.java b/src/sqlancer/ComparatorHelper.java
index 880d3ae72..cee290924 100644
--- a/src/sqlancer/ComparatorHelper.java
+++ b/src/sqlancer/ComparatorHelper.java
@@ -48,7 +48,8 @@ public static List getResultSetFirstColumnAsString(String queryString, E
e.printStackTrace();
}
}
- SQLQueryAdapter q = new SQLQueryAdapter(queryString, errors);
+ boolean canonicalizeString = state.getOptions().canonicalizeSqlString();
+ SQLQueryAdapter q = new SQLQueryAdapter(queryString, errors, true, canonicalizeString);
List resultSet = new ArrayList<>();
SQLancerResultSet result = null;
try {
@@ -60,7 +61,7 @@ public static List getResultSetFirstColumnAsString(String queryString, E
String resultTemp = result.getString(1);
if (resultTemp != null) {
resultTemp = resultTemp.replaceAll("[\\.]0+$", ""); // Remove the trailing zeros as many DBMS treat
- // it as non-bugs
+ // it as non-bugs
}
resultSet.add(resultTemp);
}
@@ -69,11 +70,12 @@ public static List getResultSetFirstColumnAsString(String queryString, E
throw e;
}
- if (e.getMessage() == null) {
- throw new AssertionError(queryString, e);
- }
- if (errors.errorIsExpected(e.getMessage())) {
- throw new IgnoreMeException();
+ Throwable current = e;
+ while (current != null) {
+ if (current.getMessage() != null && errors.errorIsExpected(current.getMessage())) {
+ throw new IgnoreMeException();
+ }
+ current = current.getCause();
}
throw new AssertionError(queryString, e);
} finally {
@@ -87,32 +89,42 @@ public static List getResultSetFirstColumnAsString(String queryString, E
public static void assumeResultSetsAreEqual(List resultSet, List secondResultSet,
String originalQueryString, List combinedString, SQLGlobalState, ?> state) {
if (resultSet.size() != secondResultSet.size()) {
- String queryFormatString = "-- %s;\n-- cardinality: %d";
+ String queryFormatString = "-- %s;" + System.lineSeparator() + "-- cardinality: %d"
+ + System.lineSeparator();
String firstQueryString = String.format(queryFormatString, originalQueryString, resultSet.size());
- String secondQueryString = String.format(queryFormatString,
- combinedString.stream().collect(Collectors.joining(";")), secondResultSet.size());
- state.getState().getLocalState().log(String.format("%s\n%s", firstQueryString, secondQueryString));
- String assertionMessage = String.format("the size of the result sets mismatch (%d and %d)!\n%s\n%s",
- resultSet.size(), secondResultSet.size(), firstQueryString, secondQueryString);
+ String combinedQueryString = String.join(";", combinedString);
+ String secondQueryString = String.format(queryFormatString, combinedQueryString, secondResultSet.size());
+ state.getState().getLocalState()
+ .log(String.format("%s" + System.lineSeparator() + "%s", firstQueryString, secondQueryString));
+ String assertionMessage = String.format(
+ "The size of the result sets mismatch (%d and %d)!" + System.lineSeparator()
+ + "First query: \"%s\", whose cardinality is: %d" + System.lineSeparator()
+ + "Second query:\"%s\", whose cardinality is: %d",
+ resultSet.size(), secondResultSet.size(), originalQueryString, resultSet.size(),
+ combinedQueryString, secondResultSet.size());
throw new AssertionError(assertionMessage);
}
Set firstHashSet = new HashSet<>(resultSet);
Set secondHashSet = new HashSet<>(secondResultSet);
- if (!firstHashSet.equals(secondHashSet)) {
+ boolean validateResultSizeOnly = state.getOptions().validateResultSizeOnly();
+ if (!validateResultSizeOnly && !firstHashSet.equals(secondHashSet)) {
Set firstResultSetMisses = new HashSet<>(firstHashSet);
firstResultSetMisses.removeAll(secondHashSet);
Set secondResultSetMisses = new HashSet<>(secondHashSet);
secondResultSetMisses.removeAll(firstHashSet);
- String queryFormatString = "-- %s;\n-- misses: %s";
+
+ String queryFormatString = "-- Query: \"%s\"; It misses: \"%s\"";
String firstQueryString = String.format(queryFormatString, originalQueryString, firstResultSetMisses);
- String secondQueryString = String.format(queryFormatString,
- combinedString.stream().collect(Collectors.joining(";")), secondResultSetMisses);
+ String secondQueryString = String.format(queryFormatString, String.join(";", combinedString),
+ secondResultSetMisses);
// update the SELECT queries to be logged at the bottom of the error log file
- state.getState().getLocalState().log(String.format("%s\n%s", firstQueryString, secondQueryString));
- String assertionMessage = String.format("the content of the result sets mismatch!\n%s\n%s",
- firstQueryString, secondQueryString);
+ state.getState().getLocalState()
+ .log(String.format("%s" + System.lineSeparator() + "%s", firstQueryString, secondQueryString));
+ String assertionMessage = String.format("The content of the result sets mismatch!" + System.lineSeparator()
+ + "First query : \"%s\"" + System.lineSeparator() + "Second query: \"%s\"", originalQueryString,
+ secondQueryString);
throw new AssertionError(assertionMessage);
}
}
@@ -166,4 +178,20 @@ public static List getCombinedResultSetNoDuplicates(String firstQueryStr
return secondResultSet;
}
+ public static String canonicalizeResultValue(String value) {
+ if (value == null) {
+ return value;
+ }
+
+ switch (value) {
+ case "-0.0":
+ return "0.0";
+ case "-0":
+ return "0";
+ default:
+ }
+
+ return value;
+ }
+
}
diff --git a/src/sqlancer/DatabaseProvider.java b/src/sqlancer/DatabaseProvider.java
index 72acb30d8..d169324fa 100644
--- a/src/sqlancer/DatabaseProvider.java
+++ b/src/sqlancer/DatabaseProvider.java
@@ -24,11 +24,25 @@ public interface DatabaseProvider, O extends DBMS
* @param globalState
* the state created and is valid for this method call.
*
+ * @return Reproducer if a bug is found and a reproducer is available.
+ *
* @throws Exception
* if creating the database fails.
*
*/
- void generateAndTestDatabase(G globalState) throws Exception;
+ Reproducer generateAndTestDatabase(G globalState) throws Exception;
+
+ /**
+ * The experimental feature: Query Plan Guidance.
+ *
+ * @param globalState
+ * the state created and is valid for this method call.
+ *
+ * @throws Exception
+ * if testing fails.
+ *
+ */
+ void generateAndTestDatabaseWithQueryPlanGuidance(G globalState) throws Exception;
C createDatabase(G globalState) throws Exception;
diff --git a/src/sqlancer/GlobalState.java b/src/sqlancer/GlobalState.java
index 64b5c731e..2b93012c2 100644
--- a/src/sqlancer/GlobalState.java
+++ b/src/sqlancer/GlobalState.java
@@ -131,7 +131,7 @@ public S getSchema() {
try {
updateSchema();
} catch (Exception e) {
- throw new AssertionError();
+ throw new AssertionError(e.getMessage());
}
}
return schema;
diff --git a/src/sqlancer/Main.java b/src/sqlancer/Main.java
index a4404a6fa..faf35e3c9 100644
--- a/src/sqlancer/Main.java
+++ b/src/sqlancer/Main.java
@@ -5,6 +5,7 @@
import java.io.IOException;
import java.io.Writer;
import java.nio.file.Files;
+import java.nio.file.Path;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
@@ -23,9 +24,31 @@
import com.beust.jcommander.JCommander;
import com.beust.jcommander.JCommander.Builder;
+import sqlancer.citus.CitusProvider;
+import sqlancer.clickhouse.ClickHouseProvider;
+import sqlancer.cnosdb.CnosDBProvider;
+import sqlancer.cockroachdb.CockroachDBProvider;
import sqlancer.common.log.Loggable;
import sqlancer.common.query.Query;
import sqlancer.common.query.SQLancerResultSet;
+import sqlancer.databend.DatabendProvider;
+import sqlancer.doris.DorisProvider;
+import sqlancer.duckdb.DuckDBProvider;
+import sqlancer.h2.H2Provider;
+import sqlancer.hive.HiveProvider;
+import sqlancer.hsqldb.HSQLDBProvider;
+import sqlancer.mariadb.MariaDBProvider;
+import sqlancer.materialize.MaterializeProvider;
+import sqlancer.mysql.MySQLProvider;
+import sqlancer.oceanbase.OceanBaseProvider;
+import sqlancer.postgres.PostgresProvider;
+import sqlancer.presto.PrestoProvider;
+import sqlancer.questdb.QuestDBProvider;
+import sqlancer.spark.SparkProvider;
+import sqlancer.sqlite3.SQLite3Provider;
+import sqlancer.tidb.TiDBProvider;
+import sqlancer.yugabyte.ycql.YCQLProvider;
+import sqlancer.yugabyte.ysql.YSQLProvider;
public final class Main {
@@ -38,7 +61,7 @@ public final class Main {
static boolean progressMonitorStarted;
static {
- System.setProperty(org.slf4j.impl.SimpleLogger.DEFAULT_LOG_LEVEL_KEY, "ERROR");
+ System.setProperty(org.slf4j.simple.SimpleLogger.DEFAULT_LOG_LEVEL_KEY, "ERROR");
if (!LOG_DIRECTORY.exists()) {
LOG_DIRECTORY.mkdir();
}
@@ -51,10 +74,19 @@ public static final class StateLogger {
private final File loggerFile;
private File curFile;
+ private File queryPlanFile;
+ private File reduceFile;
private FileWriter logFileWriter;
public FileWriter currentFileWriter;
+ private FileWriter queryPlanFileWriter;
+ private FileWriter reduceFileWriter;
+ private Path reproduceFilePath;
+
private static final List INITIALIZED_PROVIDER_NAMES = new ArrayList<>();
private final boolean logEachSelect;
+ private final boolean logQueryPlan;
+
+ private final boolean useReducer;
private final DatabaseProvider, ?, ?> databaseProvider;
private static final class AlsoWriteToConsoleFileWriter extends FileWriter {
@@ -87,6 +119,25 @@ public StateLogger(String databaseName, DatabaseProvider, ?, ?> provider, Main
if (logEachSelect) {
curFile = new File(dir, databaseName + "-cur.log");
}
+ logQueryPlan = options.logQueryPlan();
+ if (logQueryPlan) {
+ queryPlanFile = new File(dir, databaseName + "-plan.log");
+ }
+ this.useReducer = options.useReducer();
+ if (useReducer) {
+ File reduceFileDir = new File(dir, "reduce");
+ if (!reduceFileDir.exists()) {
+ reduceFileDir.mkdir();
+ }
+ this.reduceFile = new File(reduceFileDir, databaseName + "-reduce.log");
+ }
+ if (options.serializeReproduceState()) {
+ File reproduceFileDir = new File(dir, "reproduce");
+ if (!reproduceFileDir.exists()) {
+ reproduceFileDir.mkdir();
+ }
+ reproduceFilePath = new File(reproduceFileDir, databaseName + ".ser").toPath();
+ }
this.databaseProvider = provider;
}
@@ -138,6 +189,34 @@ public FileWriter getCurrentFileWriter() {
return currentFileWriter;
}
+ public FileWriter getQueryPlanFileWriter() {
+ if (!logQueryPlan) {
+ throw new UnsupportedOperationException();
+ }
+ if (queryPlanFileWriter == null) {
+ try {
+ queryPlanFileWriter = new FileWriter(queryPlanFile, true);
+ } catch (IOException e) {
+ throw new AssertionError(e);
+ }
+ }
+ return queryPlanFileWriter;
+ }
+
+ public FileWriter getReduceFileWriter() {
+ if (!useReducer) {
+ throw new UnsupportedOperationException();
+ }
+ if (reduceFileWriter == null) {
+ try {
+ reduceFileWriter = new FileWriter(reduceFile, false);
+ } catch (IOException e) {
+ throw new AssertionError(e);
+ }
+ }
+ return reduceFileWriter;
+ }
+
public void writeCurrent(StateToReproduce state) {
if (!logEachSelect) {
throw new UnsupportedOperationException();
@@ -172,6 +251,61 @@ private void write(Loggable loggable) {
}
}
+ public void writeQueryPlan(String queryPlan) {
+ if (!logQueryPlan) {
+ throw new UnsupportedOperationException();
+ }
+ try {
+ getQueryPlanFileWriter().append(removeNamesFromQueryPlans(queryPlan));
+ queryPlanFileWriter.flush();
+ } catch (IOException e) {
+ throw new AssertionError();
+ }
+ }
+
+ public void logReducer(String reducerLog) {
+ FileWriter reduceFileWriter = getReduceFileWriter();
+
+ StringBuilder sb = new StringBuilder();
+ sb.append("[reducer log] ");
+ sb.append(reducerLog);
+ try {
+ reduceFileWriter.write(sb.toString());
+ } catch (IOException e) {
+ throw new AssertionError(e);
+ } finally {
+ try {
+ reduceFileWriter.flush();
+ } catch (IOException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+ }
+
+ public void logReduced(StateToReproduce state) {
+ FileWriter reduceFileWriter = getReduceFileWriter();
+
+ StringBuilder sb = new StringBuilder();
+ for (Query> s : state.getStatements()) {
+ sb.append(databaseProvider.getLoggableFactory().createLoggable(s.getLogString()).getLogString());
+ }
+ try {
+ reduceFileWriter.write(sb.toString());
+
+ } catch (IOException e) {
+ throw new AssertionError(e);
+ } finally {
+ try {
+ reduceFileWriter.flush();
+ } catch (IOException e) {
+ // TODO Auto-generated catch block
+ e.printStackTrace();
+ }
+ }
+
+ }
+
public void logException(Throwable reduce, StateToReproduce state) {
Loggable stackTrace = getStackTrace(reduce);
FileWriter logFileWriter2 = getLogFileWriter();
@@ -184,7 +318,6 @@ public void logException(Throwable reduce, StateToReproduce state) {
try {
logFileWriter2.flush();
} catch (IOException e) {
- // TODO Auto-generated catch block
e.printStackTrace();
}
}
@@ -201,8 +334,7 @@ private void printState(FileWriter writer, StateToReproduce state) {
.getInfo(state.getDatabaseName(), state.getDatabaseVersion(), state.getSeedValue()).getLogString());
for (Query> s : state.getStatements()) {
- sb.append(s.getLogString());
- sb.append('\n');
+ sb.append(databaseProvider.getLoggableFactory().createLoggable(s.getLogString()).getLogString());
}
try {
writer.write(sb.toString());
@@ -211,6 +343,17 @@ private void printState(FileWriter writer, StateToReproduce state) {
}
}
+ private String removeNamesFromQueryPlans(String queryPlan) {
+ String result = queryPlan;
+ result = result.replaceAll("t[0-9]+", "t0"); // Avoid duplicate tables
+ result = result.replaceAll("v[0-9]+", "v0"); // Avoid duplicate views
+ result = result.replaceAll("i[0-9]+", "i0"); // Avoid duplicate indexes
+ return result + "\n";
+ }
+
+ public Path getReproduceFilePath() {
+ return reproduceFilePath;
+ }
}
public static class QueryManager {
@@ -222,10 +365,12 @@ public static class QueryManager {
}
public boolean execute(Query q, String... fills) throws Exception {
- globalState.getState().logStatement(q);
boolean success;
success = q.execute(globalState, fills);
Main.nrSuccessfulActions.addAndGet(1);
+ if (globalState.getOptions().loggerPrintFailed() || success) {
+ globalState.getState().logStatement(q);
+ }
return success;
}
@@ -241,6 +386,10 @@ public void incrementSelectQueryCount() {
Main.nrQueries.addAndGet(1);
}
+ public Long getSelectQueryCount() {
+ return Main.nrQueries.get();
+ }
+
public void incrementCreateDatabase() {
Main.nrDatabases.addAndGet(1);
}
@@ -312,13 +461,57 @@ public void run() throws Exception {
if (options.logEachSelect()) {
logger.writeCurrent(state.getState());
}
- provider.generateAndTestDatabase(state);
+ Reproducer reproducer = null;
+ if (options.enableQPG()) {
+ provider.generateAndTestDatabaseWithQueryPlanGuidance(state);
+ } else {
+ reproducer = provider.generateAndTestDatabase(state);
+ }
try {
logger.getCurrentFileWriter().close();
logger.currentFileWriter = null;
} catch (IOException e) {
throw new AssertionError(e);
}
+
+ if (options.serializeReproduceState() && reproducer != null) {
+ stateToRepro.serialize(logger.getReproduceFilePath());
+ }
+ if (options.reduceAST() && !options.useReducer()) {
+ throw new AssertionError("To reduce AST, use-reducer option must be enabled first");
+ }
+ if (options.useReducer()) {
+ if (reproducer == null) {
+ logger.getReduceFileWriter().write("current oracle does not support experimental reducer.");
+ throw new IgnoreMeException();
+ }
+ G newGlobalState = createGlobalState();
+ newGlobalState.setState(stateToRepro);
+ newGlobalState.setRandomly(r);
+ newGlobalState.setDatabaseName(databaseName);
+ newGlobalState.setMainOptions(options);
+ newGlobalState.setDbmsSpecificOptions(command);
+ QueryManager newManager = new QueryManager<>(newGlobalState);
+ newGlobalState.setStateLogger(new StateLogger(databaseName, provider, options));
+ newGlobalState.setManager(newManager);
+
+ Reducer reducer = new StatementReducer<>(provider);
+ reducer.reduce(state, reproducer, newGlobalState);
+
+ if (options.reduceAST()) {
+ Reducer astBasedReducer = new ASTBasedReducer<>(provider);
+ astBasedReducer.reduce(state, reproducer, newGlobalState);
+ }
+
+ try {
+ logger.getReduceFileWriter().close();
+ logger.reduceFileWriter = null;
+ } catch (IOException e) {
+ throw new AssertionError(e);
+ }
+
+ throw new AssertionError("Found a potential bug, please check reducer log for detail.");
+ }
}
}
@@ -419,7 +612,7 @@ public void run() {
System.out.println(
formatInteger(nrSuccessfulActions.get()) + " successfully-executed statements");
System.out.println(
- formatInteger(nrUnsuccessfulActions.get()) + " unsuccessfuly-executed statements");
+ formatInteger(nrUnsuccessfulActions.get()) + " unsuccessfully-executed statements");
}
private String formatInteger(long intValue) {
@@ -498,6 +691,10 @@ private boolean run(MainOptions options, ExecutorService execService,
executor.getStateToReproduce().exception = reduce.getMessage();
executor.getLogger().logFileWriter = null;
executor.getLogger().logException(reduce, executor.getStateToReproduce());
+ if (options.serializeReproduceState()) {
+ executor.getStateToReproduce().logStatement(reduce.getMessage()); // add the error statement
+ executor.getStateToReproduce().serialize(executor.getLogger().getReproduceFilePath());
+ }
return false;
} finally {
try {
@@ -542,9 +739,40 @@ private boolean run(MainOptions options, ExecutorService execService,
for (DatabaseProvider, ?, ?> provider : loader) {
providers.add(provider);
}
+ checkForIssue799(providers);
return providers;
}
+ // see https://github.com/sqlancer/sqlancer/issues/799
+ private static void checkForIssue799(List> providers) {
+ if (providers.isEmpty()) {
+ System.err.println(
+ "No DBMS implementations (i.e., instantiations of the DatabaseProvider class) were found. You likely ran into an issue described in https://github.com/sqlancer/sqlancer/issues/799. As a workaround, I now statically load all supported providers as of June 7, 2023.");
+ providers.add(new CitusProvider());
+ providers.add(new ClickHouseProvider());
+ providers.add(new CnosDBProvider());
+ providers.add(new CockroachDBProvider());
+ providers.add(new DatabendProvider());
+ providers.add(new DorisProvider());
+ providers.add(new DuckDBProvider());
+ providers.add(new H2Provider());
+ providers.add(new HiveProvider());
+ providers.add(new SparkProvider());
+ providers.add(new HSQLDBProvider());
+ providers.add(new MariaDBProvider());
+ providers.add(new MaterializeProvider());
+ providers.add(new MySQLProvider());
+ providers.add(new OceanBaseProvider());
+ providers.add(new PrestoProvider());
+ providers.add(new PostgresProvider());
+ providers.add(new QuestDBProvider());
+ providers.add(new SQLite3Provider());
+ providers.add(new TiDBProvider());
+ providers.add(new YCQLProvider());
+ providers.add(new YSQLProvider());
+ }
+ }
+
private static synchronized void startProgressMonitor() {
if (progressMonitorStarted) {
/*
diff --git a/src/sqlancer/MainOptions.java b/src/sqlancer/MainOptions.java
index ff588de5f..a5142fcf0 100644
--- a/src/sqlancer/MainOptions.java
+++ b/src/sqlancer/MainOptions.java
@@ -1,5 +1,7 @@
package sqlancer;
+import java.util.Objects;
+
import com.beust.jcommander.Parameter;
import com.beust.jcommander.Parameters;
@@ -8,6 +10,8 @@
@Parameters(separators = "=", commandDescription = "Options applicable to all DBMS")
public class MainOptions {
public static final int NO_SET_PORT = -1;
+ public static final int NO_REDUCE_LIMIT = -1;
+ public static final MainOptions DEFAULT_OPTIONS = new MainOptions();
@Parameter(names = { "--help", "-h" }, description = "Lists all supported options and commands", help = true)
private boolean help; // NOPMD
@@ -44,6 +48,24 @@ public class MainOptions {
@Parameter(names = "--log-execution-time", description = "Logs the execution time of each statement (requires --log-each-select to be enabled)", arity = 1)
private boolean logExecutionTime = true; // NOPMD
+ @Parameter(names = "--print-failed", description = "Logs failed insert, create and other statements without results", arity = 1)
+ private boolean loggerPrintFailed = true; // NOPMD
+
+ @Parameter(names = "--qpg-enable", description = "Enable the experimental feature Query Plan Guidance (QPG)", arity = 1)
+ private boolean enableQPG;
+
+ @Parameter(names = "--qpg-log-query-plan", description = "Logs the query plans of each query (requires --qpg-enable)", arity = 1)
+ private boolean logQueryPlan;
+
+ @Parameter(names = "--qpg-max-interval", description = "The maximum number of iterations to mutate tables if no new query plans (requires --qpg-enable)")
+ private static int qpgMaxInterval = 1000;
+
+ @Parameter(names = "--qpg-reward-weight", description = "The weight (0-1) of last reward when updating weighted average reward. A higher value denotes average reward is more affected by the last reward (requires --qpg-enable)")
+ private static double qpgk = 0.25;
+
+ @Parameter(names = "--qpg-selection-probability", description = "The probability (0-1) of the random selection of mutators. A higher value (>0.5) favors exploration over exploitation. (requires --qpg-enable)")
+ private static double qpgProbability = 0.7;
+
@Parameter(names = "--username", description = "The user name used to log into the DBMS")
private String userName = "sqlancer"; // NOPMD
@@ -101,6 +123,33 @@ public class MainOptions {
@Parameter(names = "--database-prefix", description = "The prefix used for each database created")
private String databasePrefix = "database"; // NOPMD
+ @Parameter(names = "--serialize-reproduce-state", description = "Serialize the state to reproduce")
+ private boolean serializeReproduceState = false; // NOPMD
+
+ @Parameter(names = "--use-reducer", description = "EXPERIMENTAL Attempt to reduce queries using a simple reducer")
+ private boolean useReducer = false; // NOPMD
+
+ @Parameter(names = "--reduce-ast", description = "EXPERIMENTAL perform AST reduction after statement reduction")
+ private boolean reduceAST = false; // NOPMD
+
+ @Parameter(names = "--statement-reducer-max-steps", description = "EXPERIMENTAL Maximum steps the statement reducer will do")
+ private long maxStatementReduceSteps = NO_REDUCE_LIMIT; // NOPMD
+
+ @Parameter(names = "--statement-reducer-max-time", description = "EXPERIMENTAL Maximum time duration (secs) the AST-based reducer will do")
+ private long maxASTReduceTime = NO_REDUCE_LIMIT; // NOPMD
+
+ @Parameter(names = "--ast-reducer-max-steps", description = "EXPERIMENTAL Maximum steps the AST-based reducer will do")
+ private long maxASTReduceSteps = NO_REDUCE_LIMIT; // NOPMD
+
+ @Parameter(names = "--ast-reducer-max-time", description = "EXPERIMENTAL Maximum time duration (secs) the statement reducer will do")
+ private long maxStatementReduceTime = NO_REDUCE_LIMIT; // NOPMD
+
+ @Parameter(names = "--validate-result-size-only", description = "Should validate result size only and skip comparing content of the result set ", arity = 1)
+ private boolean validateResultSizeOnly = false; // NOPMD
+
+ @Parameter(names = "--canonicalize-sql-strings", description = "Should canonicalize query string (add ';' at the end", arity = 1)
+ private boolean canonicalizeSqlString = true; // NOPMD
+
public int getMaxExpressionDepth() {
return maxExpressionDepth;
}
@@ -138,6 +187,30 @@ public boolean logExecutionTime() {
return logExecutionTime;
}
+ public boolean loggerPrintFailed() {
+ return loggerPrintFailed;
+ }
+
+ public boolean logQueryPlan() {
+ return logQueryPlan;
+ }
+
+ public boolean enableQPG() {
+ return enableQPG;
+ }
+
+ public int getQPGMaxMutationInterval() {
+ return qpgMaxInterval;
+ }
+
+ public double getQPGk() {
+ return qpgk;
+ }
+
+ public double getQPGProbability() {
+ return qpgProbability;
+ }
+
public int getNrQueries() {
return nrQueries;
}
@@ -218,6 +291,14 @@ public boolean isHelp() {
return help;
}
+ public boolean isDefaultPassword() {
+ return Objects.equals(password, DEFAULT_OPTIONS.password);
+ }
+
+ public boolean isDefaultUsername() {
+ return Objects.equals(userName, DEFAULT_OPTIONS.userName);
+ }
+
public String getDatabasePrefix() {
return databasePrefix;
}
@@ -226,4 +307,40 @@ public boolean performConnectionTest() {
return useConnectionTest;
}
+ public boolean serializeReproduceState() {
+ return serializeReproduceState;
+ }
+
+ public boolean useReducer() {
+ return useReducer;
+ }
+
+ public boolean reduceAST() {
+ return reduceAST;
+ }
+
+ public long getMaxStatementReduceSteps() {
+ return maxStatementReduceSteps;
+ }
+
+ public long getMaxStatementReduceTime() {
+ return maxStatementReduceTime;
+ }
+
+ public long getMaxASTReduceSteps() {
+ return maxASTReduceSteps;
+ }
+
+ public long getMaxASTReduceTime() {
+ return maxASTReduceTime;
+ }
+
+ public boolean validateResultSizeOnly() {
+ return validateResultSizeOnly;
+ }
+
+ public boolean canonicalizeSqlString() {
+ return canonicalizeSqlString;
+ }
+
}
diff --git a/src/sqlancer/OracleFactory.java b/src/sqlancer/OracleFactory.java
index 897293c6b..9d6e1704b 100644
--- a/src/sqlancer/OracleFactory.java
+++ b/src/sqlancer/OracleFactory.java
@@ -4,7 +4,7 @@
public interface OracleFactory> {
- TestOracle create(G globalState) throws Exception;
+ TestOracle create(G globalState) throws Exception;
/**
* Indicates whether the test oracle requires that all tables (including views) contain at least one row.
diff --git a/src/sqlancer/ProviderAdapter.java b/src/sqlancer/ProviderAdapter.java
index a16f8388c..346567300 100644
--- a/src/sqlancer/ProviderAdapter.java
+++ b/src/sqlancer/ProviderAdapter.java
@@ -1,9 +1,14 @@
package sqlancer;
+import java.sql.SQLException;
+import java.util.HashMap;
+import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.stream.Collectors;
import sqlancer.StateToReproduce.OracleRunReproductionState;
+import sqlancer.common.DBMSCommon;
import sqlancer.common.oracle.CompositeTestOracle;
import sqlancer.common.oracle.TestOracle;
import sqlancer.common.schema.AbstractSchema;
@@ -14,7 +19,14 @@ public abstract class ProviderAdapter globalClass;
private final Class optionClass;
- public ProviderAdapter(Class globalClass, Class optionClass) {
+ // Variables for QPG
+ Map queryPlanPool = new HashMap<>();
+ static double[] weightedAverageReward; // static variable for sharing across all threads
+ int currentSelectRewards;
+ int currentSelectCounts;
+ int currentMutationOperator = -1;
+
+ protected ProviderAdapter(Class globalClass, Class optionClass) {
this.globalClass = globalClass;
this.optionClass = optionClass;
}
@@ -35,47 +47,56 @@ public Class getOptionClass() {
}
@Override
- public void generateAndTestDatabase(G globalState) throws Exception {
+ public Reproducer generateAndTestDatabase(G globalState) throws Exception {
try {
generateDatabase(globalState);
checkViewsAreValid(globalState);
globalState.getManager().incrementCreateDatabase();
- TestOracle oracle = getTestOracle(globalState);
+ TestOracle oracle = getTestOracle(globalState);
for (int i = 0; i < globalState.getOptions().getNrQueries(); i++) {
try (OracleRunReproductionState localState = globalState.getState().createLocalState()) {
assert localState != null;
try {
oracle.check();
globalState.getManager().incrementSelectQueryCount();
- } catch (IgnoreMeException e) {
-
+ } catch (IgnoreMeException ignored) {
+ } catch (AssertionError e) {
+ Reproducer reproducer = oracle.getLastReproducer();
+ if (reproducer != null) {
+ return reproducer;
+ }
+ throw e;
}
- assert localState != null;
localState.executedWithoutError();
}
}
} finally {
globalState.getConnection().close();
}
+ return null;
}
- protected abstract void checkViewsAreValid(G globalState);
+ protected abstract void checkViewsAreValid(G globalState) throws SQLException;
- protected TestOracle getTestOracle(G globalState) throws Exception {
+ protected TestOracle getTestOracle(G globalState) throws Exception {
List extends OracleFactory> testOracleFactory = globalState.getDbmsSpecificOptions()
.getTestOracleFactory();
boolean testOracleRequiresMoreThanZeroRows = testOracleFactory.stream()
- .anyMatch(p -> p.requiresAllTablesToContainRows());
+ .anyMatch(OracleFactory::requiresAllTablesToContainRows);
boolean userRequiresMoreThanZeroRows = globalState.getOptions().testOnlyWithMoreThanZeroRows();
boolean checkZeroRows = testOracleRequiresMoreThanZeroRows || userRequiresMoreThanZeroRows;
if (checkZeroRows && globalState.getSchema().containsTableWithZeroRows(globalState)) {
- throw new IgnoreMeException();
+ if (globalState.getOptions().enableQPG()) {
+ addRowsToAllTables(globalState);
+ } else {
+ throw new IgnoreMeException();
+ }
}
if (testOracleFactory.size() == 1) {
return testOracleFactory.get(0).create(globalState);
} else {
- return new CompositeTestOracle(testOracleFactory.stream().map(o -> {
+ return new CompositeTestOracle<>(testOracleFactory.stream().map(o -> {
try {
return o.create(globalState);
} catch (Exception e1) {
@@ -87,4 +108,151 @@ protected TestOracle getTestOracle(G globalState) throws Exception {
public abstract void generateDatabase(G globalState) throws Exception;
+ // QPG: entry function
+ @Override
+ public void generateAndTestDatabaseWithQueryPlanGuidance(G globalState) throws Exception {
+ if (weightedAverageReward == null) {
+ weightedAverageReward = initializeWeightedAverageReward(); // Same length as the list of mutators
+ }
+ try {
+ generateDatabase(globalState);
+ checkViewsAreValid(globalState);
+ globalState.getManager().incrementCreateDatabase();
+
+ Long executedQueryCount = 0L;
+ while (executedQueryCount < globalState.getOptions().getNrQueries()) {
+ int numOfNoNewQueryPlans = 0;
+ TestOracle oracle = getTestOracle(globalState);
+ while (executedQueryCount < globalState.getOptions().getNrQueries()) {
+ try (OracleRunReproductionState localState = globalState.getState().createLocalState()) {
+ assert localState != null;
+ try {
+ oracle.check();
+ String query = oracle.getLastQueryString();
+ executedQueryCount += 1;
+ if (addQueryPlan(query, globalState)) {
+ numOfNoNewQueryPlans = 0;
+ } else {
+ numOfNoNewQueryPlans++;
+ }
+ globalState.getManager().incrementSelectQueryCount();
+ } catch (IgnoreMeException e) {
+
+ }
+ localState.executedWithoutError();
+ }
+ // exit loop to mutate tables if no new query plans have been found after a while
+ if (numOfNoNewQueryPlans > globalState.getOptions().getQPGMaxMutationInterval()) {
+ mutateTables(globalState);
+ break;
+ }
+ }
+ }
+ } finally {
+ globalState.getConnection().close();
+ }
+ }
+
+ // QPG: mutate tables for a new database state
+ private synchronized boolean mutateTables(G globalState) throws Exception {
+ // Update rewards based on a set of newly generated queries in last iteration
+ if (currentMutationOperator != -1) {
+ weightedAverageReward[currentMutationOperator] += ((double) currentSelectRewards
+ / (double) currentSelectCounts) * globalState.getOptions().getQPGk();
+ }
+ currentMutationOperator = -1;
+
+ // Choose mutator based on the rewards
+ int selectedActionIndex = 0;
+ if (Randomly.getPercentage() < globalState.getOptions().getQPGProbability()) {
+ selectedActionIndex = globalState.getRandomly().getInteger(0, weightedAverageReward.length);
+ } else {
+ selectedActionIndex = DBMSCommon.getMaxIndexInDoubleArray(weightedAverageReward);
+ }
+ int reward = 0;
+
+ try {
+ executeMutator(selectedActionIndex, globalState);
+ checkViewsAreValid(globalState); // Remove the invalid views
+ reward = checkQueryPlan(globalState);
+ } catch (IgnoreMeException | AssertionError e) {
+ } finally {
+ // Update rewards based on existing queries associated with the query plan pool
+ updateReward(selectedActionIndex, (double) reward / (double) queryPlanPool.size(), globalState);
+ currentMutationOperator = selectedActionIndex;
+ }
+
+ // Clear the variables for storing the rewards of the action on a set of newly generated queries
+ currentSelectRewards = 0;
+ currentSelectCounts = 0;
+ return true;
+ }
+
+ // QPG: add a query plan to the query plan pool and return true if the query plan is new
+ private boolean addQueryPlan(String selectStr, G globalState) throws Exception {
+ String queryPlan = getQueryPlan(selectStr, globalState);
+
+ if (globalState.getOptions().logQueryPlan()) {
+ globalState.getLogger().writeQueryPlan(queryPlan);
+ }
+
+ currentSelectCounts += 1;
+ if (queryPlanPool.containsKey(queryPlan)) {
+ return false;
+ } else {
+ queryPlanPool.put(queryPlan, selectStr);
+ currentSelectRewards += 1;
+ return true;
+ }
+ }
+
+ // Obtain the reward of the current action based on the queries associated with the query plan pool
+ private int checkQueryPlan(G globalState) throws Exception {
+ int newQueryPlanFound = 0;
+ HashMap modifiedQueryPlan = new HashMap<>();
+ for (Iterator> it = queryPlanPool.entrySet().iterator(); it.hasNext();) {
+ Map.Entry item = it.next();
+ String queryPlan = item.getKey();
+ String selectStr = item.getValue();
+ String newQueryPlan = getQueryPlan(selectStr, globalState);
+ if (newQueryPlan.isEmpty()) { // Invalid query
+ it.remove();
+ } else if (!queryPlan.equals(newQueryPlan)) { // A query plan has been changed
+ it.remove();
+ modifiedQueryPlan.put(newQueryPlan, selectStr);
+ if (!queryPlanPool.containsKey(newQueryPlan)) { // A new query plan is found
+ newQueryPlanFound++;
+ }
+ }
+ }
+ queryPlanPool.putAll(modifiedQueryPlan);
+ return newQueryPlanFound;
+ }
+
+ // QPG: update the reward of current action
+ private void updateReward(int actionIndex, double reward, G globalState) {
+ weightedAverageReward[actionIndex] += (reward - weightedAverageReward[actionIndex])
+ * globalState.getOptions().getQPGk();
+ }
+
+ // QPG: initialize the weighted average reward of all mutation operators (required implementation in specific DBMS)
+ protected double[] initializeWeightedAverageReward() {
+ throw new UnsupportedOperationException();
+ }
+
+ // QPG: obtain the query plan of a query (required implementation in specific DBMS)
+ protected String getQueryPlan(String selectStr, G globalState) throws Exception {
+ throw new UnsupportedOperationException();
+ }
+
+ // QPG: execute a mutation operator (required implementation in specific DBMS)
+ protected void executeMutator(int index, G globalState) throws Exception {
+ throw new UnsupportedOperationException();
+ }
+
+ // QPG: add rows to all tables (required implementation in specific DBMS when enabling PQS oracle for QPG)
+ protected boolean addRowsToAllTables(G globalState) throws Exception {
+ throw new UnsupportedOperationException();
+ }
+
}
diff --git a/src/sqlancer/Randomly.java b/src/sqlancer/Randomly.java
index fa317f1f0..8494c189a 100644
--- a/src/sqlancer/Randomly.java
+++ b/src/sqlancer/Randomly.java
@@ -1,8 +1,10 @@
package sqlancer;
import java.math.BigDecimal;
+import java.math.BigInteger;
import java.util.ArrayList;
import java.util.Arrays;
+import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.function.Supplier;
@@ -15,6 +17,7 @@ public final class Randomly {
private static int cacheSize = 100;
private final List cachedLongs = new ArrayList<>();
+ private final List cachedIntegers = new ArrayList<>();
private final List cachedStrings = new ArrayList<>();
private final List cachedDoubles = new ArrayList<>();
private final List cachedBytes = new ArrayList<>();
@@ -29,6 +32,12 @@ private void addToCache(long val) {
}
}
+ private void addToCache(int val) {
+ if (useCaching && cachedIntegers.size() < cacheSize && !cachedIntegers.contains(val)) {
+ cachedIntegers.add(val);
+ }
+ }
+
private void addToCache(double val) {
if (useCaching && cachedDoubles.size() < cacheSize && !cachedDoubles.contains(val)) {
cachedDoubles.add(val);
@@ -49,6 +58,14 @@ private Long getFromLongCache() {
}
}
+ private Integer getFromIntegerCache() {
+ if (!useCaching || cachedIntegers.isEmpty()) {
+ return null;
+ } else {
+ return Randomly.fromList(cachedIntegers);
+ }
+ }
+
private Double getFromDoubleCache() {
if (!useCaching) {
return null;
@@ -118,6 +135,12 @@ public static List nonEmptySubset(List columns, int nr) {
return extractNrRandomColumns(columns, nr);
}
+ public static List nonEmptySubsetLeast(List columns, int min) {
+ int nr = getNextInt(min, columns.size() + 1);
+ assert nr <= columns.size();
+ return extractNrRandomColumns(columns, nr);
+ }
+
public static List nonEmptySubsetPotentialDuplicates(List columns) {
List arr = new ArrayList<>();
for (int i = 0; i < Randomly.smallNumber() + 1; i++) {
@@ -133,17 +156,12 @@ public static List subset(List columns) {
public static List subset(int nr, @SuppressWarnings("unchecked") T... values) {
List list = new ArrayList<>();
- for (T val : values) {
- list.add(val);
- }
+ Collections.addAll(list, values);
return extractNrRandomColumns(list, nr);
}
public static List subset(@SuppressWarnings("unchecked") T... values) {
- List list = new ArrayList<>();
- for (T val : values) {
- list.add(val);
- }
+ List list = new ArrayList<>(Arrays.asList(values));
return subset(list);
}
@@ -204,7 +222,6 @@ public String getString(Randomly r) {
},
ALPHANUMERIC {
-
@Override
public String getString(Randomly r) {
return getStringOfAlphabet(r, ALPHANUMERIC_ALPHABET);
@@ -213,7 +230,6 @@ public String getString(Randomly r) {
},
ALPHANUMERIC_SPECIALCHAR {
-
@Override
public String getString(Randomly r) {
return getStringOfAlphabet(r, ALPHANUMERIC_SPECIALCHAR_ALPHABET);
@@ -350,7 +366,6 @@ public long getNonZeroInteger() {
do {
value = getInteger();
} while (value == 0);
- assert value != 0;
addToCache(value);
return value;
}
@@ -373,6 +388,24 @@ public long getPositiveInteger() {
return value;
}
+ public int getPositiveIntegerInt() {
+ if (cacheProbability()) {
+ Integer value = getFromIntegerCache();
+ if (value != null && value >= 0) {
+ return value;
+ }
+ }
+ int value;
+ if (smallBiasProbability()) {
+ value = Randomly.fromOptions(0, Integer.MAX_VALUE, 1);
+ } else {
+ value = getNextInt(0, Integer.MAX_VALUE);
+ }
+ addToCache(value);
+ assert value >= 0;
+ return value;
+ }
+
public double getFiniteDouble() {
while (true) {
double val = getDouble();
@@ -424,8 +457,19 @@ public long getLong(long left, long right) {
return getNextLong(left, right);
}
+ public BigInteger getBigInteger(BigInteger left, BigInteger right) {
+ if (left.equals(right)) {
+ return left;
+ }
+ BigInteger result = new BigInteger(String.valueOf(getInteger(left.intValue(), right.intValue())));
+ if (result.compareTo(left) < 0 && result.compareTo(right) > 0) {
+ throw new IgnoreMeException();
+ }
+ return result;
+ }
+
public BigDecimal getRandomBigDecimal() {
- return new BigDecimal(getThreadRandom().get().nextDouble());
+ return BigDecimal.valueOf(getThreadRandom().get().nextDouble());
}
public long getPositiveIntegerNotNull() {
@@ -494,7 +538,7 @@ private static long getNextLong(long lower, long upper) {
if (lower == upper) {
return lower;
}
- return (long) (getThreadRandom().get().longs(lower, upper).findFirst().getAsLong());
+ return getThreadRandom().get().longs(lower, upper).findFirst().getAsLong();
}
private static int getNextInt(int lower, int upper) {
diff --git a/src/sqlancer/Reducer.java b/src/sqlancer/Reducer.java
new file mode 100644
index 000000000..0e6589262
--- /dev/null
+++ b/src/sqlancer/Reducer.java
@@ -0,0 +1,7 @@
+package sqlancer;
+
+public interface Reducer> {
+
+ void reduce(G state, Reproducer reproducer, G newGlobalState) throws Exception;
+
+}
diff --git a/src/sqlancer/Reproducer.java b/src/sqlancer/Reproducer.java
new file mode 100644
index 000000000..ef64bd0fe
--- /dev/null
+++ b/src/sqlancer/Reproducer.java
@@ -0,0 +1,5 @@
+package sqlancer;
+
+public interface Reproducer> {
+ boolean bugStillTriggers(G globalState);
+}
diff --git a/src/sqlancer/SQLProviderAdapter.java b/src/sqlancer/SQLProviderAdapter.java
index 8de7523a3..efb4fab67 100644
--- a/src/sqlancer/SQLProviderAdapter.java
+++ b/src/sqlancer/SQLProviderAdapter.java
@@ -10,7 +10,7 @@
public abstract class SQLProviderAdapter>, O extends DBMSSpecificOptions extends OracleFactory>>
extends ProviderAdapter {
- public SQLProviderAdapter(Class globalClass, Class optionClass) {
+ protected SQLProviderAdapter(Class globalClass, Class optionClass) {
super(globalClass, optionClass);
}
diff --git a/src/sqlancer/StandaloneReducer.java b/src/sqlancer/StandaloneReducer.java
new file mode 100644
index 000000000..813160060
--- /dev/null
+++ b/src/sqlancer/StandaloneReducer.java
@@ -0,0 +1,140 @@
+package sqlancer;
+
+import java.io.FileWriter;
+import java.io.PrintWriter;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.ArrayList;
+import java.util.List;
+
+import sqlancer.common.query.Query;
+
+/**
+ * A standalone tool to reduce bug-triggering SQL statements using the delta debugging algorithm.
+ */
+public class StandaloneReducer {
+ private int partitionNum = 2;
+ private final StateToReproduce originalState;
+ private final DatabaseProvider, ?, ?> databaseProvider;
+ private final Path outputPath;
+
+ public StandaloneReducer(Path inputPath, Path outputPath) throws Exception {
+ this.originalState = StateToReproduce.deserialize(inputPath);
+ this.databaseProvider = originalState.getDatabaseProvider();
+ if (this.databaseProvider == null) {
+ throw new IllegalStateException("Failed to get database provider from .ser file");
+ }
+ this.outputPath = outputPath != null ? outputPath
+ : Paths.get(inputPath.toString().replaceAll("\\.ser$", ".sql"));
+ }
+
+ /**
+ * Performs the main reduction algorithm using partition-based delta debugging.
+ *
+ * @return List of reduced SQL statements that still trigger bugs.
+ */
+ public List> reduce() throws Exception {
+ List> queries = new ArrayList<>(originalState.getStatements());
+ if (queries.size() <= 1) {
+ return queries;
+ }
+
+ partitionNum = 2;
+ while (queries.size() >= 2) {
+ boolean changedInThisPass = false;
+ List> result = tryReduction(queries);
+
+ if (result.size() < queries.size()) {
+ queries = result;
+ changedInThisPass = true;
+ }
+
+ if (changedInThisPass) {
+ partitionNum = 2;
+ } else {
+ if (partitionNum >= queries.size()) {
+ break;
+ }
+ partitionNum = Math.min(partitionNum * 2, queries.size());
+ }
+ }
+
+ try (PrintWriter writer = new PrintWriter(new FileWriter(outputPath.toFile()))) {
+ for (Query> query : queries) {
+ writer.println(query.getQueryString());
+ }
+ }
+ System.out.println("Reduction completed successfully! SQL statements written to: " + outputPath.toString());
+ System.out.println("Final size: " + queries.size() + " statements ("
+ + String.format("%.1f", (1.0 - (double) queries.size() / originalState.getStatements().size()) * 100)
+ + "% reduction)");
+
+ return queries;
+ }
+
+ private List> tryReduction(List> queries) throws Exception {
+ int start = 0;
+ int subLength = queries.size() / partitionNum;
+
+ while (start < queries.size()) {
+ List> candidateQueries = new ArrayList<>(queries);
+ int endPoint = Math.min(start + subLength, candidateQueries.size());
+ candidateQueries.subList(start, endPoint).clear();
+
+ if (testExceptionStillExists(candidateQueries)) {
+ return candidateQueries;
+ }
+
+ start += subLength;
+ }
+
+ return queries;
+ }
+
+ // Test if bug still exists with reduced query set
+ @SuppressWarnings("unchecked")
+ private , O extends DBMSSpecificOptions>, C extends SQLancerDBConnection> boolean testExceptionStillExists(
+ List> queries) {
+ try {
+ DatabaseProvider typedProvider = (DatabaseProvider) databaseProvider;
+ G globalState = typedProvider.getGlobalStateClass().getDeclaredConstructor().newInstance();
+
+ try (C connection = typedProvider.createDatabase(globalState)) {
+ globalState.setConnection(connection);
+ for (Query> query : queries) {
+ try {
+ Query typedQuery = (Query) query;
+ typedQuery.execute(globalState);
+ } catch (Throwable e) {
+ // Any exception not declared as an expected error by the query indicates that an (unexpected)
+ // exception still exists
+ return true;
+ }
+ }
+ // No exception occurred
+ return false;
+ }
+ } catch (Throwable e) {
+ return true;
+ }
+ }
+
+ public static void main(String[] args) {
+ try {
+ if (args.length == 0) {
+ System.err.println(
+ "Usage: java -cp target/sqlancer-2.0.0.jar sqlancer.StandaloneReducer [output-file]");
+ System.exit(1);
+ }
+ Path inputPath = Paths.get(args[0]);
+ Path outputPath = args.length > 1 ? Paths.get(args[1]) : null;
+
+ StandaloneReducer reducer = new StandaloneReducer(inputPath, outputPath);
+ reducer.reduce();
+ } catch (Throwable e) {
+ System.err.println("ERROR: " + e.getMessage());
+ e.printStackTrace();
+ System.exit(1);
+ }
+ }
+}
diff --git a/src/sqlancer/StateToReproduce.java b/src/sqlancer/StateToReproduce.java
index f6408ce58..e44d0ccf6 100644
--- a/src/sqlancer/StateToReproduce.java
+++ b/src/sqlancer/StateToReproduce.java
@@ -1,19 +1,26 @@
package sqlancer;
import java.io.Closeable;
+import java.io.IOException;
+import java.io.ObjectInputStream;
+import java.io.ObjectOutputStream;
+import java.io.Serializable;
+import java.nio.file.Files;
+import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import sqlancer.common.query.Query;
-public class StateToReproduce {
+public class StateToReproduce implements Serializable {
+ private static final long serialVersionUID = 1L;
- private final List> statements = new ArrayList<>();
+ private List> statements = new ArrayList<>();
private final String databaseName;
- private final DatabaseProvider, ?, ?> databaseProvider;
+ private transient DatabaseProvider, ?, ?> databaseProvider;
public String databaseVersion;
@@ -21,7 +28,7 @@ public class StateToReproduce {
String exception;
- public OracleRunReproductionState localState;
+ public transient OracleRunReproductionState localState;
public StateToReproduce(String databaseName, DatabaseProvider, ?, ?> databaseProvider) {
this.databaseName = databaseName;
@@ -40,6 +47,10 @@ public String getDatabaseVersion() {
return databaseVersion;
}
+ public DatabaseProvider, ?, ?> getDatabaseProvider() {
+ return databaseProvider;
+ }
+
/**
* Logs the statement string without executing the corresponding statement.
*
@@ -70,6 +81,9 @@ public List> getStatements() {
return Collections.unmodifiableList(statements);
}
+ /**
+ * @deprecated
+ */
@Deprecated
public void commentStatements() {
for (int i = 0; i < statements.size(); i++) {
@@ -100,7 +114,7 @@ public class OracleRunReproductionState implements Closeable {
private final List> statements = new ArrayList<>();
- public boolean success;
+ private boolean success;
public OracleRunReproductionState() {
StateToReproduce.this.localState = this;
@@ -128,4 +142,47 @@ public OracleRunReproductionState createLocalState() {
return new OracleRunReproductionState();
}
+ public void serialize(Path path) {
+ try (ObjectOutputStream oos = new ObjectOutputStream(Files.newOutputStream(path))) {
+ oos.writeObject(this);
+ } catch (IOException e) {
+ throw new AssertionError(e);
+ }
+ }
+
+ public static StateToReproduce deserialize(Path path) {
+ try (ObjectInputStream ois = new ObjectInputStream(Files.newInputStream(path))) {
+ return (StateToReproduce) ois.readObject();
+ } catch (IOException | ClassNotFoundException e) {
+ throw new AssertionError(e);
+ }
+ }
+
+ private void writeObject(ObjectOutputStream out) throws IOException {
+ out.defaultWriteObject();
+
+ out.writeObject(this.databaseProvider != null ? this.databaseProvider.getDBMSName() : null);
+ }
+
+ private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
+ in.defaultReadObject();
+ String dbmsName = (String) in.readObject();
+
+ DatabaseProvider, ?, ?> provider = null;
+ if (dbmsName != null) {
+ List> providers = Main.getDBMSProviders();
+ for (DatabaseProvider, ?, ?> p : providers) {
+ if (p.getDBMSName().equals(dbmsName)) {
+ provider = p;
+ break;
+ }
+ }
+ }
+ this.databaseProvider = provider;
+ }
+
+ public void setStatements(List> statements) {
+ this.statements = statements;
+ }
+
}
diff --git a/src/sqlancer/StatementExecutor.java b/src/sqlancer/StatementExecutor.java
index 26265d136..4f8f48b8f 100644
--- a/src/sqlancer/StatementExecutor.java
+++ b/src/sqlancer/StatementExecutor.java
@@ -70,7 +70,7 @@ public void executeStatements() throws Exception {
success = globalState.executeStatement(query);
} while (nextAction.canBeRetried() && !success
&& nrTries++ < globalState.getOptions().getNrStatementRetryCount());
- } catch (IgnoreMeException e) {
+ } catch (IgnoreMeException ignored) {
}
if (query != null && query.couldAffectSchema()) {
diff --git a/src/sqlancer/StatementReducer.java b/src/sqlancer/StatementReducer.java
new file mode 100644
index 000000000..e066aca84
--- /dev/null
+++ b/src/sqlancer/StatementReducer.java
@@ -0,0 +1,146 @@
+package sqlancer;
+
+import java.time.Duration;
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.List;
+
+import sqlancer.common.query.Query;
+
+public class StatementReducer, O extends DBMSSpecificOptions>, C extends SQLancerDBConnection>
+ implements Reducer {
+ private final DatabaseProvider provider;
+ private boolean observedChange;
+ private int partitionNum;
+
+ private long currentReduceSteps;
+ private long currentReduceTime;
+
+ private long maxReduceSteps;
+ private long maxReduceTime;
+
+ Instant timeOfReductionBegins;
+
+ public StatementReducer(DatabaseProvider provider) {
+ this.provider = provider;
+ }
+
+ private boolean hasNotReachedLimit(long curr, long limit) {
+ if (limit == MainOptions.NO_REDUCE_LIMIT) {
+ return true;
+ }
+ return curr < limit;
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public void reduce(G state, Reproducer reproducer, G newGlobalState) throws Exception {
+
+ maxReduceTime = state.getOptions().getMaxStatementReduceTime();
+ maxReduceSteps = state.getOptions().getMaxStatementReduceSteps();
+
+ List> knownToReproduceBugStatements = new ArrayList<>();
+ for (Query> stat : state.getState().getStatements()) {
+ knownToReproduceBugStatements.add((Query) stat);
+ }
+
+ // System.out.println("Starting query:");
+ // Main.StateLogger logger = newGlobalState.getLogger();
+ // printQueries(knownToReproduceBugStatements);
+ // System.out.println();
+
+ if (knownToReproduceBugStatements.size() <= 1) {
+ return;
+ }
+
+ timeOfReductionBegins = Instant.now();
+ currentReduceSteps = 0;
+ currentReduceTime = 0;
+ partitionNum = 2;
+
+ while (knownToReproduceBugStatements.size() >= 2 && hasNotReachedLimit(currentReduceSteps, maxReduceSteps)
+ && hasNotReachedLimit(currentReduceTime, maxReduceTime)) {
+ observedChange = false;
+
+ knownToReproduceBugStatements = tryReduction(state, reproducer, newGlobalState,
+ knownToReproduceBugStatements);
+
+ if (!observedChange) {
+ if (partitionNum == knownToReproduceBugStatements.size()) {
+ break;
+ }
+ // increase the search granularity
+ partitionNum = Math.min(partitionNum * 2, knownToReproduceBugStatements.size());
+ }
+ }
+
+ // System.out.println("Reduced query:");
+ // printQueries(knownToReproduceBugStatements);
+ newGlobalState.getState().setStatements(new ArrayList<>(knownToReproduceBugStatements));
+ newGlobalState.getLogger().logReduced(newGlobalState.getState());
+
+ }
+
+ private List> tryReduction(G state, // NOPMD
+ Reproducer reproducer, G newGlobalState, List> knownToReproduceBugStatements) throws Exception {
+
+ List> statements = knownToReproduceBugStatements;
+
+ int start = 0;
+ int subLength = statements.size() / partitionNum;
+ while (start < statements.size()) {
+ // newStatements = candidate[:start] + candidate[start+subLength:]
+ // in other word, remove [start, start+subLength) from candidates
+ try (C con2 = provider.createDatabase(newGlobalState)) {
+ newGlobalState.setConnection(con2);
+ List> candidateStatements = new ArrayList<>(statements);
+ int endPoint = Math.min(start + subLength, candidateStatements.size());
+ candidateStatements.subList(start, endPoint).clear();
+ newGlobalState.getState().setStatements(new ArrayList<>(candidateStatements));
+
+ for (Query s : candidateStatements) {
+ try {
+ s.execute(newGlobalState);
+ } catch (Throwable ignoredException) {
+ // ignore
+ }
+ }
+ try {
+ if (reproducer.bugStillTriggers(newGlobalState)) {
+ observedChange = true;
+ statements = candidateStatements;
+ partitionNum = Math.max(partitionNum - 1, 2);
+ // reproducer.outputHook((SQLite3GlobalState) newGlobalState);
+ newGlobalState.getLogger().logReduced(newGlobalState.getState());
+ break;
+
+ }
+ } catch (Throwable ignoredException) {
+
+ }
+ } catch (Exception e) {
+ e.printStackTrace();
+ }
+
+ currentReduceSteps++;
+ Instant currentInstant = Instant.now();
+
+ currentReduceTime = Duration.between(timeOfReductionBegins, currentInstant).getSeconds();
+ if (!hasNotReachedLimit(currentReduceSteps, maxReduceSteps)
+ || !hasNotReachedLimit(currentReduceTime, maxReduceTime)) {
+ return statements;
+ }
+ start = start + subLength;
+ }
+ return statements;
+ }
+
+ @SuppressWarnings("unused")
+ private void printQueries(List> statements) {
+ System.out.println("===============================");
+ for (Query> q : statements) {
+ System.out.println(q.getLogString());
+ }
+ System.out.println("===============================");
+ }
+}
diff --git a/src/sqlancer/arangodb/ArangoDBComparatorHelper.java b/src/sqlancer/arangodb/ArangoDBComparatorHelper.java
deleted file mode 100644
index 2a00a312d..000000000
--- a/src/sqlancer/arangodb/ArangoDBComparatorHelper.java
+++ /dev/null
@@ -1,73 +0,0 @@
-package sqlancer.arangodb;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import com.arangodb.entity.BaseDocument;
-
-import sqlancer.IgnoreMeException;
-import sqlancer.Main;
-import sqlancer.arangodb.query.ArangoDBSelectQuery;
-import sqlancer.common.query.ExpectedErrors;
-
-public final class ArangoDBComparatorHelper {
-
- private ArangoDBComparatorHelper() {
-
- }
-
- public static List getResultSetAsDocumentList(ArangoDBSelectQuery query,
- ArangoDBProvider.ArangoDBGlobalState state) throws Exception {
- ExpectedErrors errors = query.getExpectedErrors();
- List result;
- try {
- query.executeAndGet(state);
- Main.nrSuccessfulActions.addAndGet(1);
- result = query.getResultSet();
- return result;
- } catch (Exception e) {
- if (e instanceof IgnoreMeException) {
- throw e;
- }
- Main.nrUnsuccessfulActions.addAndGet(1);
- if (e.getMessage() == null) {
- throw new AssertionError(query.getLogString(), e);
- }
- if (errors.errorIsExpected(e.getMessage())) {
- throw new IgnoreMeException();
- }
- throw new AssertionError(query.getLogString(), e);
- }
-
- }
-
- public static void assumeResultSetsAreEqual(List resultSet, List secondResultSet,
- ArangoDBSelectQuery originalQuery) {
- if (resultSet.size() != secondResultSet.size()) {
- String assertionMessage = String.format("The Size of the result sets mismatch (%d and %d)!\n%s",
- resultSet.size(), secondResultSet.size(), originalQuery.getLogString());
- throw new AssertionError(assertionMessage);
- }
- Set firstHashSet = new HashSet<>(resultSet);
- Set secondHashSet = new HashSet<>(secondResultSet);
-
- if (!firstHashSet.equals(secondHashSet)) {
- Set firstResultSetMisses = new HashSet<>(firstHashSet);
- firstResultSetMisses.removeAll(secondHashSet);
- Set secondResultSetMisses = new HashSet<>(secondHashSet);
- secondResultSetMisses.removeAll(firstHashSet);
- StringBuilder firstMisses = new StringBuilder();
- for (BaseDocument document : firstResultSetMisses) {
- firstMisses.append(document).append(" ");
- }
- StringBuilder secondMisses = new StringBuilder();
- for (BaseDocument document : secondResultSetMisses) {
- secondMisses.append(document).append(" ");
- }
- String assertMessage = String.format("The Content of the result sets mismatch!\n %s \n %s\n %s",
- firstMisses.toString(), secondMisses.toString(), originalQuery.getLogString());
- throw new AssertionError(assertMessage);
- }
- }
-}
diff --git a/src/sqlancer/arangodb/ArangoDBConnection.java b/src/sqlancer/arangodb/ArangoDBConnection.java
deleted file mode 100644
index b3e5b85d3..000000000
--- a/src/sqlancer/arangodb/ArangoDBConnection.java
+++ /dev/null
@@ -1,31 +0,0 @@
-package sqlancer.arangodb;
-
-import com.arangodb.ArangoDB;
-import com.arangodb.ArangoDatabase;
-
-import sqlancer.SQLancerDBConnection;
-
-public class ArangoDBConnection implements SQLancerDBConnection {
-
- private final ArangoDB client;
- private final ArangoDatabase database;
-
- public ArangoDBConnection(ArangoDB client, ArangoDatabase database) {
- this.client = client;
- this.database = database;
- }
-
- @Override
- public String getDatabaseVersion() throws Exception {
- return client.getVersion().getVersion();
- }
-
- @Override
- public void close() throws Exception {
- client.shutdown();
- }
-
- public ArangoDatabase getDatabase() {
- return database;
- }
-}
diff --git a/src/sqlancer/arangodb/ArangoDBLoggableFactory.java b/src/sqlancer/arangodb/ArangoDBLoggableFactory.java
deleted file mode 100644
index 927d9f320..000000000
--- a/src/sqlancer/arangodb/ArangoDBLoggableFactory.java
+++ /dev/null
@@ -1,40 +0,0 @@
-package sqlancer.arangodb;
-
-import java.util.Arrays;
-
-import sqlancer.common.log.Loggable;
-import sqlancer.common.log.LoggableFactory;
-import sqlancer.common.log.LoggedString;
-import sqlancer.common.query.Query;
-
-public class ArangoDBLoggableFactory extends LoggableFactory {
- @Override
- protected Loggable createLoggable(String input, String suffix) {
- return new LoggedString(input + suffix);
- }
-
- @Override
- public Query> getQueryForStateToReproduce(String queryString) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public Query> commentOutQuery(Query> query) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- protected Loggable infoToLoggable(String time, String databaseName, String databaseVersion, long seedValue) {
- StringBuilder sb = new StringBuilder();
- sb.append("// Time: ").append(time).append("\n");
- sb.append("// Database: ").append(databaseName).append("\n");
- sb.append("// Database version: ").append(databaseVersion).append("\n");
- sb.append("// seed value: ").append(seedValue).append("\n");
- return new LoggedString(sb.toString());
- }
-
- @Override
- public Loggable convertStacktraceToLoggable(Throwable throwable) {
- return new LoggedString(Arrays.toString(throwable.getStackTrace()) + "\n" + throwable.getMessage());
- }
-}
diff --git a/src/sqlancer/arangodb/ArangoDBOptions.java b/src/sqlancer/arangodb/ArangoDBOptions.java
deleted file mode 100644
index b821fe8c3..000000000
--- a/src/sqlancer/arangodb/ArangoDBOptions.java
+++ /dev/null
@@ -1,50 +0,0 @@
-package sqlancer.arangodb;
-
-import static sqlancer.arangodb.ArangoDBOptions.ArangoDBOracleFactory.QUERY_PARTITIONING;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import com.beust.jcommander.Parameter;
-import com.beust.jcommander.Parameters;
-
-import sqlancer.DBMSSpecificOptions;
-import sqlancer.OracleFactory;
-import sqlancer.arangodb.ArangoDBProvider.ArangoDBGlobalState;
-import sqlancer.arangodb.test.ArangoDBQueryPartitioningWhereTester;
-import sqlancer.common.oracle.CompositeTestOracle;
-import sqlancer.common.oracle.TestOracle;
-
-@Parameters(commandDescription = "ArangoDB (experimental)")
-public class ArangoDBOptions implements DBMSSpecificOptions {
-
- @Parameter(names = "--oracle")
- public List oracles = Arrays.asList(QUERY_PARTITIONING);
-
- @Parameter(names = "--test-random-type-inserts", description = "Insert random types instead of schema types.")
- public boolean testRandomTypeInserts;
-
- @Parameter(names = "--max-number-indexes", description = "The maximum number of indexes used.", arity = 1)
- public int maxNumberIndexes = 15;
-
- @Parameter(names = "--with-optimizer-rule-tests", description = "Adds an additional query, where a random set"
- + "of optimizer rules are disabled.", arity = 1)
- public boolean withOptimizerRuleTests;
-
- @Override
- public List getTestOracleFactory() {
- return oracles;
- }
-
- public enum ArangoDBOracleFactory implements OracleFactory {
- QUERY_PARTITIONING {
- @Override
- public TestOracle create(ArangoDBGlobalState globalState) throws Exception {
- List oracles = new ArrayList<>();
- oracles.add(new ArangoDBQueryPartitioningWhereTester(globalState));
- return new CompositeTestOracle(oracles, globalState);
- }
- }
- }
-}
diff --git a/src/sqlancer/arangodb/ArangoDBProvider.java b/src/sqlancer/arangodb/ArangoDBProvider.java
deleted file mode 100644
index 7bdc2fa01..000000000
--- a/src/sqlancer/arangodb/ArangoDBProvider.java
+++ /dev/null
@@ -1,137 +0,0 @@
-package sqlancer.arangodb;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import com.arangodb.ArangoDB;
-import com.arangodb.ArangoDatabase;
-import com.google.auto.service.AutoService;
-
-import sqlancer.AbstractAction;
-import sqlancer.DatabaseProvider;
-import sqlancer.ExecutionTimer;
-import sqlancer.GlobalState;
-import sqlancer.IgnoreMeException;
-import sqlancer.ProviderAdapter;
-import sqlancer.Randomly;
-import sqlancer.StatementExecutor;
-import sqlancer.arangodb.gen.ArangoDBCreateIndexGenerator;
-import sqlancer.arangodb.gen.ArangoDBInsertGenerator;
-import sqlancer.arangodb.gen.ArangoDBTableGenerator;
-import sqlancer.common.log.LoggableFactory;
-import sqlancer.common.query.Query;
-
-@AutoService(DatabaseProvider.class)
-public class ArangoDBProvider
- extends ProviderAdapter {
-
- public ArangoDBProvider() {
- super(ArangoDBGlobalState.class, ArangoDBOptions.class);
- }
-
- enum Action implements AbstractAction {
- INSERT(ArangoDBInsertGenerator::getQuery), CREATE_INDEX(ArangoDBCreateIndexGenerator::getQuery);
-
- private final ArangoDBQueryProvider queryProvider;
-
- Action(ArangoDBQueryProvider queryProvider) {
- this.queryProvider = queryProvider;
- }
-
- @Override
- public Query> getQuery(ArangoDBGlobalState globalState) throws Exception {
- return queryProvider.getQuery(globalState);
- }
- }
-
- private static int mapActions(ArangoDBGlobalState globalState, Action a) {
- Randomly r = globalState.getRandomly();
- switch (a) {
- case INSERT:
- return r.getInteger(0, globalState.getOptions().getMaxNumberInserts());
- case CREATE_INDEX:
- return r.getInteger(0, globalState.getDbmsSpecificOptions().maxNumberIndexes);
- default:
- throw new AssertionError(a);
- }
- }
-
- public static class ArangoDBGlobalState extends GlobalState {
-
- private final List schemaTables = new ArrayList<>();
-
- public void addTable(ArangoDBSchema.ArangoDBTable table) {
- schemaTables.add(table);
- }
-
- @Override
- protected void executeEpilogue(Query> q, boolean success, ExecutionTimer timer) throws Exception {
- boolean logExecutionTime = getOptions().logExecutionTime();
- if (success && getOptions().printSucceedingStatements()) {
- System.out.println(q.getLogString());
- }
- if (logExecutionTime) {
- getLogger().writeCurrent("//" + timer.end().asString());
- }
- if (q.couldAffectSchema()) {
- updateSchema();
- }
- }
-
- @Override
- protected ArangoDBSchema readSchema() throws Exception {
- return new ArangoDBSchema(schemaTables);
- }
- }
-
- @Override
- protected void checkViewsAreValid(ArangoDBGlobalState globalState) {
-
- }
-
- @Override
- public void generateDatabase(ArangoDBGlobalState globalState) throws Exception {
- for (int i = 0; i < Randomly.fromOptions(4, 5, 6); i++) {
- boolean success;
- do {
- ArangoDBQueryAdapter queryAdapter = new ArangoDBTableGenerator().getQuery(globalState);
- success = globalState.executeStatement(queryAdapter);
- } while (!success);
- }
- StatementExecutor se = new StatementExecutor<>(globalState, Action.values(),
- ArangoDBProvider::mapActions, (q) -> {
- if (globalState.getSchema().getDatabaseTables().isEmpty()) {
- throw new IgnoreMeException();
- }
- });
- se.executeStatements();
- }
-
- @Override
- public ArangoDBConnection createDatabase(ArangoDBGlobalState globalState) throws Exception {
- ArangoDB arangoDB = new ArangoDB.Builder().user(globalState.getOptions().getUserName())
- .password(globalState.getOptions().getPassword()).build();
- ArangoDatabase database = arangoDB.db(globalState.getDatabaseName());
- try {
- database.drop();
- // When the database does not exist, an ArangoDB exception is thrown. Since we are not sure
- // if this is the first time the database is used, the simplest is dropping it and ignoring
- // the exception.
- } catch (Exception ignored) {
-
- }
- arangoDB.createDatabase(globalState.getDatabaseName());
- database = arangoDB.db(globalState.getDatabaseName());
- return new ArangoDBConnection(arangoDB, database);
- }
-
- @Override
- public String getDBMSName() {
- return "arangodb";
- }
-
- @Override
- public LoggableFactory getLoggableFactory() {
- return new ArangoDBLoggableFactory();
- }
-}
diff --git a/src/sqlancer/arangodb/ArangoDBQueryAdapter.java b/src/sqlancer/arangodb/ArangoDBQueryAdapter.java
deleted file mode 100644
index 34cdb3709..000000000
--- a/src/sqlancer/arangodb/ArangoDBQueryAdapter.java
+++ /dev/null
@@ -1,16 +0,0 @@
-package sqlancer.arangodb;
-
-import sqlancer.common.query.Query;
-
-public abstract class ArangoDBQueryAdapter extends Query {
- @Override
- public String getQueryString() {
- // Should not be called as it is used only in SQL dependent classes
- throw new UnsupportedOperationException();
- }
-
- @Override
- public String getUnterminatedQueryString() {
- throw new UnsupportedOperationException();
- }
-}
diff --git a/src/sqlancer/arangodb/ArangoDBQueryProvider.java b/src/sqlancer/arangodb/ArangoDBQueryProvider.java
deleted file mode 100644
index 94a4ffda3..000000000
--- a/src/sqlancer/arangodb/ArangoDBQueryProvider.java
+++ /dev/null
@@ -1,6 +0,0 @@
-package sqlancer.arangodb;
-
-@FunctionalInterface
-public interface ArangoDBQueryProvider {
- ArangoDBQueryAdapter getQuery(S globalState) throws Exception;
-}
diff --git a/src/sqlancer/arangodb/ArangoDBSchema.java b/src/sqlancer/arangodb/ArangoDBSchema.java
deleted file mode 100644
index 35e251b8b..000000000
--- a/src/sqlancer/arangodb/ArangoDBSchema.java
+++ /dev/null
@@ -1,70 +0,0 @@
-package sqlancer.arangodb;
-
-import java.util.Collections;
-import java.util.List;
-
-import sqlancer.Randomly;
-import sqlancer.common.schema.AbstractSchema;
-import sqlancer.common.schema.AbstractTable;
-import sqlancer.common.schema.AbstractTableColumn;
-import sqlancer.common.schema.AbstractTables;
-import sqlancer.common.schema.TableIndex;
-
-public class ArangoDBSchema extends AbstractSchema {
-
- public enum ArangoDBDataType {
- INTEGER, DOUBLE, STRING, BOOLEAN;
-
- public static ArangoDBDataType getRandom() {
- return Randomly.fromOptions(values());
- }
- }
-
- public static class ArangoDBColumn extends AbstractTableColumn {
-
- private final boolean isId;
- private final boolean isNullable;
-
- public ArangoDBColumn(String name, ArangoDBDataType type, boolean isId, boolean isNullable) {
- super(name, null, type);
- this.isId = isId;
- this.isNullable = isNullable;
- }
-
- public boolean isId() {
- return isId;
- }
-
- public boolean isNullable() {
- return isNullable;
- }
- }
-
- public ArangoDBSchema(List databaseTables) {
- super(databaseTables);
- }
-
- public static class ArangoDBTables extends AbstractTables {
-
- public ArangoDBTables(List tables) {
- super(tables);
- }
- }
-
- public static class ArangoDBTable
- extends AbstractTable {
-
- public ArangoDBTable(String name, List columns, boolean isView) {
- super(name, columns, Collections.emptyList(), isView);
- }
-
- @Override
- public long getNrRows(ArangoDBProvider.ArangoDBGlobalState globalState) {
- throw new UnsupportedOperationException();
- }
- }
-
- public ArangoDBTables getRandomTableNonEmptyTables() {
- return new ArangoDBTables(Randomly.nonEmptySubset(getDatabaseTables()));
- }
-}
diff --git a/src/sqlancer/arangodb/ast/ArangoDBConstant.java b/src/sqlancer/arangodb/ast/ArangoDBConstant.java
deleted file mode 100644
index 351dbd822..000000000
--- a/src/sqlancer/arangodb/ast/ArangoDBConstant.java
+++ /dev/null
@@ -1,108 +0,0 @@
-package sqlancer.arangodb.ast;
-
-import com.arangodb.entity.BaseDocument;
-
-import sqlancer.common.ast.newast.Node;
-
-public abstract class ArangoDBConstant implements Node {
- private ArangoDBConstant() {
-
- }
-
- public abstract void setValueInDocument(BaseDocument document, String key);
-
- public abstract Object getValue();
-
- public static class ArangoDBIntegerConstant extends ArangoDBConstant {
-
- private final int value;
-
- public ArangoDBIntegerConstant(int value) {
- this.value = value;
- }
-
- @Override
- public void setValueInDocument(BaseDocument document, String key) {
- document.addAttribute(key, value);
- }
-
- @Override
- public Object getValue() {
- return value;
- }
- }
-
- public static Node createIntegerConstant(int value) {
- return new ArangoDBIntegerConstant(value);
- }
-
- public static class ArangoDBStringConstant extends ArangoDBConstant {
- private final String value;
-
- public ArangoDBStringConstant(String value) {
- this.value = value;
- }
-
- @Override
- public void setValueInDocument(BaseDocument document, String key) {
- document.addAttribute(key, value);
- }
-
- @Override
- public Object getValue() {
- return "'" + value.replace("\\", "\\\\").replace("'", "\\'") + "'";
- }
- }
-
- public static Node createStringConstant(String value) {
- return new ArangoDBStringConstant(value);
- }
-
- public static class ArangoDBBooleanConstant extends ArangoDBConstant {
- private final boolean value;
-
- public ArangoDBBooleanConstant(boolean value) {
- this.value = value;
- }
-
- @Override
- public void setValueInDocument(BaseDocument document, String key) {
- document.addAttribute(key, value);
- }
-
- @Override
- public Object getValue() {
- return value;
- }
- }
-
- public static Node createBooleanConstant(boolean value) {
- return new ArangoDBBooleanConstant(value);
- }
-
- public static class ArangoDBDoubleConstant extends ArangoDBConstant {
- private final double value;
-
- public ArangoDBDoubleConstant(double value) {
- if (Double.isInfinite(value) || Double.isNaN(value)) {
- this.value = 0.0;
- } else {
- this.value = value;
- }
- }
-
- @Override
- public void setValueInDocument(BaseDocument document, String key) {
- document.addAttribute(key, value);
- }
-
- @Override
- public Object getValue() {
- return value;
- }
- }
-
- public static Node createDoubleConstant(double value) {
- return new ArangoDBDoubleConstant(value);
- }
-}
diff --git a/src/sqlancer/arangodb/ast/ArangoDBExpression.java b/src/sqlancer/arangodb/ast/ArangoDBExpression.java
deleted file mode 100644
index facbbfe9e..000000000
--- a/src/sqlancer/arangodb/ast/ArangoDBExpression.java
+++ /dev/null
@@ -1,4 +0,0 @@
-package sqlancer.arangodb.ast;
-
-public interface ArangoDBExpression {
-}
diff --git a/src/sqlancer/arangodb/ast/ArangoDBSelect.java b/src/sqlancer/arangodb/ast/ArangoDBSelect.java
deleted file mode 100644
index 9fb91d553..000000000
--- a/src/sqlancer/arangodb/ast/ArangoDBSelect.java
+++ /dev/null
@@ -1,79 +0,0 @@
-package sqlancer.arangodb.ast;
-
-import java.util.List;
-
-import sqlancer.arangodb.ArangoDBSchema;
-import sqlancer.common.ast.newast.Node;
-
-public class ArangoDBSelect implements Node {
- private List fromColumns;
- private List projectionColumns;
- private boolean hasFilter;
- private Node filterClause;
- private boolean hasComputed;
- private List> computedClause;
-
- public List getFromColumns() {
- if (fromColumns == null || fromColumns.isEmpty()) {
- throw new IllegalStateException();
- }
- return fromColumns;
- }
-
- public void setFromColumns(List fromColumns) {
- if (fromColumns == null || fromColumns.isEmpty()) {
- throw new IllegalStateException();
- }
- this.fromColumns = fromColumns;
- }
-
- public List getProjectionColumns() {
- if (projectionColumns == null) {
- throw new IllegalStateException();
- }
- return projectionColumns;
- }
-
- public void setProjectionColumns(List projectionColumns) {
- if (projectionColumns == null) {
- throw new IllegalStateException();
- }
- this.projectionColumns = projectionColumns;
- }
-
- public void setFilterClause(Node filterClause) {
- if (filterClause == null) {
- hasFilter = false;
- this.filterClause = null;
- return;
- }
- hasFilter = true;
- this.filterClause = filterClause;
- }
-
- public Node getFilterClause() {
- return filterClause;
- }
-
- public boolean hasFilter() {
- return hasFilter;
- }
-
- public void setComputedClause(List> computedColumns) {
- if (computedColumns == null || computedColumns.isEmpty()) {
- hasComputed = false;
- this.computedClause = null;
- return;
- }
- hasComputed = true;
- this.computedClause = computedColumns;
- }
-
- public List> getComputedClause() {
- return computedClause;
- }
-
- public boolean hasComputed() {
- return hasComputed;
- }
-}
diff --git a/src/sqlancer/arangodb/ast/ArangoDBUnsupportedPredicate.java b/src/sqlancer/arangodb/ast/ArangoDBUnsupportedPredicate.java
deleted file mode 100644
index eabd25578..000000000
--- a/src/sqlancer/arangodb/ast/ArangoDBUnsupportedPredicate.java
+++ /dev/null
@@ -1,6 +0,0 @@
-package sqlancer.arangodb.ast;
-
-import sqlancer.common.ast.newast.Node;
-
-public class ArangoDBUnsupportedPredicate implements Node {
-}
diff --git a/src/sqlancer/arangodb/gen/ArangoDBComputedExpressionGenerator.java b/src/sqlancer/arangodb/gen/ArangoDBComputedExpressionGenerator.java
deleted file mode 100644
index 01e2e557a..000000000
--- a/src/sqlancer/arangodb/gen/ArangoDBComputedExpressionGenerator.java
+++ /dev/null
@@ -1,85 +0,0 @@
-package sqlancer.arangodb.gen;
-
-import sqlancer.Randomly;
-import sqlancer.arangodb.ArangoDBProvider;
-import sqlancer.arangodb.ArangoDBSchema;
-import sqlancer.arangodb.ast.ArangoDBConstant;
-import sqlancer.arangodb.ast.ArangoDBExpression;
-import sqlancer.common.ast.newast.ColumnReferenceNode;
-import sqlancer.common.ast.newast.NewFunctionNode;
-import sqlancer.common.ast.newast.Node;
-import sqlancer.common.gen.UntypedExpressionGenerator;
-
-public class ArangoDBComputedExpressionGenerator
- extends UntypedExpressionGenerator, ArangoDBSchema.ArangoDBColumn> {
- private final ArangoDBProvider.ArangoDBGlobalState globalState;
-
- public ArangoDBComputedExpressionGenerator(ArangoDBProvider.ArangoDBGlobalState globalState) {
- this.globalState = globalState;
- }
-
- @Override
- public Node generateConstant() {
- ArangoDBSchema.ArangoDBDataType dataType = ArangoDBSchema.ArangoDBDataType.getRandom();
- switch (dataType) {
- case INTEGER:
- return ArangoDBConstant.createIntegerConstant((int) globalState.getRandomly().getInteger());
- case BOOLEAN:
- return ArangoDBConstant.createBooleanConstant(Randomly.getBoolean());
- case DOUBLE:
- return ArangoDBConstant.createDoubleConstant(globalState.getRandomly().getDouble());
- case STRING:
- return ArangoDBConstant.createStringConstant(globalState.getRandomly().getString());
- default:
- throw new AssertionError(dataType);
- }
- }
-
- public enum ComputedFunction {
- ADD(2, "+"), MINUS(2, "-"), MULTIPLY(2, "*"), DIVISION(2, "/"), MODULUS(2, "%");
-
- private final int nrArgs;
- private final String operatorName;
-
- ComputedFunction(int nrArgs, String operatorName) {
- this.nrArgs = nrArgs;
- this.operatorName = operatorName;
- }
-
- public static ComputedFunction getRandom() {
- return Randomly.fromOptions(values());
- }
-
- public int getNrArgs() {
- return nrArgs;
- }
-
- public String getOperatorName() {
- return operatorName;
- }
- }
-
- @Override
- protected Node generateExpression(int depth) {
- if (depth >= globalState.getOptions().getMaxExpressionDepth() || Randomly.getBoolean()) {
- return generateLeafNode();
- }
- ComputedFunction function = ComputedFunction.getRandom();
- return new NewFunctionNode<>(generateExpressions(function.getNrArgs(), depth + 1), function);
- }
-
- @Override
- protected Node generateColumn() {
- return new ColumnReferenceNode<>(Randomly.fromList(columns));
- }
-
- @Override
- public Node negatePredicate(Node predicate) {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public Node isNull(Node expr) {
- throw new UnsupportedOperationException();
- }
-}
diff --git a/src/sqlancer/arangodb/gen/ArangoDBCreateIndexGenerator.java b/src/sqlancer/arangodb/gen/ArangoDBCreateIndexGenerator.java
deleted file mode 100644
index 6a1b872da..000000000
--- a/src/sqlancer/arangodb/gen/ArangoDBCreateIndexGenerator.java
+++ /dev/null
@@ -1,18 +0,0 @@
-package sqlancer.arangodb.gen;
-
-import sqlancer.arangodb.ArangoDBProvider;
-import sqlancer.arangodb.ArangoDBQueryAdapter;
-import sqlancer.arangodb.ArangoDBSchema;
-import sqlancer.arangodb.query.ArangoDBCreateIndexQuery;
-
-public final class ArangoDBCreateIndexGenerator {
- private ArangoDBCreateIndexGenerator() {
-
- }
-
- public static ArangoDBQueryAdapter getQuery(ArangoDBProvider.ArangoDBGlobalState globalState) {
- ArangoDBSchema.ArangoDBTable randomTable = globalState.getSchema().getRandomTable();
- ArangoDBSchema.ArangoDBColumn column = randomTable.getRandomColumn();
- return new ArangoDBCreateIndexQuery(column);
- }
-}
diff --git a/src/sqlancer/arangodb/gen/ArangoDBFilterExpressionGenerator.java b/src/sqlancer/arangodb/gen/ArangoDBFilterExpressionGenerator.java
deleted file mode 100644
index 1a2fc4b5e..000000000
--- a/src/sqlancer/arangodb/gen/ArangoDBFilterExpressionGenerator.java
+++ /dev/null
@@ -1,153 +0,0 @@
-package sqlancer.arangodb.gen;
-
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-
-import sqlancer.Randomly;
-import sqlancer.arangodb.ArangoDBProvider;
-import sqlancer.arangodb.ArangoDBSchema;
-import sqlancer.arangodb.ast.ArangoDBConstant;
-import sqlancer.arangodb.ast.ArangoDBExpression;
-import sqlancer.arangodb.ast.ArangoDBUnsupportedPredicate;
-import sqlancer.common.ast.BinaryOperatorNode;
-import sqlancer.common.ast.newast.ColumnReferenceNode;
-import sqlancer.common.ast.newast.NewBinaryOperatorNode;
-import sqlancer.common.ast.newast.NewUnaryPrefixOperatorNode;
-import sqlancer.common.ast.newast.Node;
-import sqlancer.common.gen.UntypedExpressionGenerator;
-
-public class ArangoDBFilterExpressionGenerator
- extends UntypedExpressionGenerator, ArangoDBSchema.ArangoDBColumn> {
-
- private final ArangoDBProvider.ArangoDBGlobalState globalState;
- private int numberOfComputedVariables;
-
- private enum Expression {
- BINARY_LOGICAL, UNARY_PREFIX, BINARY_COMPARISON
- }
-
- public ArangoDBFilterExpressionGenerator(ArangoDBProvider.ArangoDBGlobalState globalState) {
- this.globalState = globalState;
- }
-
- public void setNumberOfComputedVariables(int numberOfComputedVariables) {
- this.numberOfComputedVariables = numberOfComputedVariables;
- }
-
- @Override
- public Node generateConstant() {
- ArangoDBSchema.ArangoDBDataType dataType = ArangoDBSchema.ArangoDBDataType.getRandom();
- switch (dataType) {
- case INTEGER:
- return ArangoDBConstant.createIntegerConstant((int) globalState.getRandomly().getInteger());
- case BOOLEAN:
- return ArangoDBConstant.createBooleanConstant(Randomly.getBoolean());
- case DOUBLE:
- return ArangoDBConstant.createDoubleConstant(globalState.getRandomly().getDouble());
- case STRING:
- return ArangoDBConstant.createStringConstant(globalState.getRandomly().getString());
- default:
- throw new AssertionError(dataType);
- }
- }
-
- @Override
- protected Node generateExpression(int depth) {
- if (depth >= globalState.getOptions().getMaxExpressionDepth() || Randomly.getBoolean()) {
- return generateLeafNode();
- }
- List possibleOptions = new ArrayList<>(Arrays.asList(Expression.values()));
- Expression expression = Randomly.fromList(possibleOptions);
- switch (expression) {
- case BINARY_COMPARISON:
- BinaryOperatorNode.Operator op = ArangoDBBinaryComparisonOperator.getRandom();
- return new NewBinaryOperatorNode<>(generateExpression(depth + 1), generateExpression(depth + 1), op);
- case UNARY_PREFIX:
- return new NewUnaryPrefixOperatorNode<>(generateExpression(depth + 1),
- ArangoDBUnaryPrefixOperator.getRandom());
- case BINARY_LOGICAL:
- op = ArangoDBBinaryLogicalOperator.getRandom();
- return new NewBinaryOperatorNode<>(generateExpression(depth + 1), generateExpression(depth + 1), op);
- default:
- throw new AssertionError(expression);
- }
- }
-
- @Override
- protected Node generateColumn() {
- ArangoDBSchema.ArangoDBTable dummy = new ArangoDBSchema.ArangoDBTable("", new ArrayList<>(), false);
- if (Randomly.getBoolean() || numberOfComputedVariables == 0) {
- ArangoDBSchema.ArangoDBColumn column = Randomly.fromList(columns);
- return new ColumnReferenceNode<>(column);
- } else {
- int maxNumber = globalState.getRandomly().getInteger(0, numberOfComputedVariables);
- ArangoDBSchema.ArangoDBColumn column = new ArangoDBSchema.ArangoDBColumn("c" + maxNumber,
- ArangoDBSchema.ArangoDBDataType.INTEGER, false, false);
- column.setTable(dummy);
- return new ColumnReferenceNode<>(column);
- }
- }
-
- @Override
- public Node negatePredicate(Node predicate) {
- return new NewUnaryPrefixOperatorNode<>(predicate, ArangoDBUnaryPrefixOperator.NOT);
- }
-
- @Override
- public Node isNull(Node expr) {
- return new ArangoDBUnsupportedPredicate<>();
- }
-
- public enum ArangoDBBinaryComparisonOperator implements BinaryOperatorNode.Operator {
- EQUALS("=="), NOT_EQUALS("!="), LESS_THAN("<"), LESS_OR_EQUAL("<="), GREATER_THAN(">"), GREATER_OR_EQUAL(">=");
-
- private final String representation;
-
- ArangoDBBinaryComparisonOperator(String representation) {
- this.representation = representation;
- }
-
- @Override
- public String getTextRepresentation() {
- return representation;
- }
-
- public static ArangoDBBinaryComparisonOperator getRandom() {
- return Randomly.fromOptions(values());
- }
- }
-
- public enum ArangoDBUnaryPrefixOperator implements BinaryOperatorNode.Operator {
- NOT("!");
-
- private final String representation;
-
- ArangoDBUnaryPrefixOperator(String representation) {
- this.representation = representation;
- }
-
- @Override
- public String getTextRepresentation() {
- return representation;
- }
-
- public static ArangoDBUnaryPrefixOperator getRandom() {
- return Randomly.fromOptions(values());
- }
- }
-
- public enum ArangoDBBinaryLogicalOperator implements BinaryOperatorNode.Operator {
- AND, OR;
-
- @Override
- public String getTextRepresentation() {
- return toString();
- }
-
- public static BinaryOperatorNode.Operator getRandom() {
- return Randomly.fromOptions(values());
- }
- }
-
-}
diff --git a/src/sqlancer/arangodb/gen/ArangoDBInsertGenerator.java b/src/sqlancer/arangodb/gen/ArangoDBInsertGenerator.java
deleted file mode 100644
index 3cfceeed4..000000000
--- a/src/sqlancer/arangodb/gen/ArangoDBInsertGenerator.java
+++ /dev/null
@@ -1,39 +0,0 @@
-package sqlancer.arangodb.gen;
-
-import com.arangodb.entity.BaseDocument;
-
-import sqlancer.arangodb.ArangoDBProvider;
-import sqlancer.arangodb.ArangoDBQueryAdapter;
-import sqlancer.arangodb.ArangoDBSchema;
-import sqlancer.arangodb.query.ArangoDBConstantGenerator;
-import sqlancer.arangodb.query.ArangoDBInsertQuery;
-
-public final class ArangoDBInsertGenerator {
-
- private final ArangoDBProvider.ArangoDBGlobalState globalState;
-
- private ArangoDBInsertGenerator(ArangoDBProvider.ArangoDBGlobalState globalState) {
- this.globalState = globalState;
- }
-
- public static ArangoDBQueryAdapter getQuery(ArangoDBProvider.ArangoDBGlobalState globalState) {
- return new ArangoDBInsertGenerator(globalState).generate();
- }
-
- private ArangoDBQueryAdapter generate() {
- BaseDocument result = new BaseDocument();
- ArangoDBSchema.ArangoDBTable table = globalState.getSchema().getRandomTable();
- ArangoDBConstantGenerator constantGenerator = new ArangoDBConstantGenerator(globalState);
-
- for (int i = 0; i < table.getColumns().size(); i++) {
- if (!globalState.getDbmsSpecificOptions().testRandomTypeInserts) {
- constantGenerator.addRandomConstantWithType(result, table.getColumns().get(i).getName(),
- table.getColumns().get(i).getType());
- } else {
- constantGenerator.addRandomConstant(result, table.getColumns().get(i).getName());
- }
- }
-
- return new ArangoDBInsertQuery(table, result);
- }
-}
diff --git a/src/sqlancer/arangodb/gen/ArangoDBTableGenerator.java b/src/sqlancer/arangodb/gen/ArangoDBTableGenerator.java
deleted file mode 100644
index 1236c3ce4..000000000
--- a/src/sqlancer/arangodb/gen/ArangoDBTableGenerator.java
+++ /dev/null
@@ -1,44 +0,0 @@
-package sqlancer.arangodb.gen;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import sqlancer.Randomly;
-import sqlancer.arangodb.ArangoDBProvider;
-import sqlancer.arangodb.ArangoDBQueryAdapter;
-import sqlancer.arangodb.ArangoDBSchema;
-import sqlancer.arangodb.query.ArangoDBCreateTableQuery;
-
-public class ArangoDBTableGenerator {
-
- private ArangoDBSchema.ArangoDBTable table;
- private final List columnsToBeAdded = new ArrayList<>();
-
- public ArangoDBQueryAdapter getQuery(ArangoDBProvider.ArangoDBGlobalState globalState) {
- String tableName = globalState.getSchema().getFreeTableName();
- ArangoDBCreateTableQuery createTableQuery = new ArangoDBCreateTableQuery(tableName);
- table = new ArangoDBSchema.ArangoDBTable(tableName, columnsToBeAdded, false);
- for (int i = 0; i < Randomly.smallNumber() + 1; i++) {
- String columnName = String.format("c%d", i);
- createColumn(columnName);
- }
- globalState.addTable(table);
- return createTableQuery;
- }
-
- private ArangoDBSchema.ArangoDBDataType createColumn(String columnName) {
- ArangoDBSchema.ArangoDBDataType dataType = ArangoDBSchema.ArangoDBDataType.getRandom();
- ArangoDBSchema.ArangoDBColumn newColumn = new ArangoDBSchema.ArangoDBColumn(columnName, dataType, false, false);
- newColumn.setTable(table);
- columnsToBeAdded.add(newColumn);
- return dataType;
- }
-
- public String getTableName() {
- return table.getName();
- }
-
- public ArangoDBSchema.ArangoDBTable getGeneratedTable() {
- return table;
- }
-}
diff --git a/src/sqlancer/arangodb/query/ArangoDBConstantGenerator.java b/src/sqlancer/arangodb/query/ArangoDBConstantGenerator.java
deleted file mode 100644
index 406e8adca..000000000
--- a/src/sqlancer/arangodb/query/ArangoDBConstantGenerator.java
+++ /dev/null
@@ -1,46 +0,0 @@
-package sqlancer.arangodb.query;
-
-import com.arangodb.entity.BaseDocument;
-
-import sqlancer.Randomly;
-import sqlancer.arangodb.ArangoDBProvider;
-import sqlancer.arangodb.ArangoDBSchema;
-import sqlancer.arangodb.ast.ArangoDBConstant;
-
-public class ArangoDBConstantGenerator {
- private final ArangoDBProvider.ArangoDBGlobalState globalState;
-
- public ArangoDBConstantGenerator(ArangoDBProvider.ArangoDBGlobalState globalState) {
- this.globalState = globalState;
- }
-
- public void addRandomConstant(BaseDocument document, String key) {
- ArangoDBSchema.ArangoDBDataType type = ArangoDBSchema.ArangoDBDataType.getRandom();
- addRandomConstantWithType(document, key, type);
- }
-
- public void addRandomConstantWithType(BaseDocument document, String key, ArangoDBSchema.ArangoDBDataType dataType) {
- ArangoDBConstant constant;
- switch (dataType) {
- case STRING:
- constant = new ArangoDBConstant.ArangoDBStringConstant(globalState.getRandomly().getString());
- constant.setValueInDocument(document, key);
- return;
- case DOUBLE:
- constant = new ArangoDBConstant.ArangoDBDoubleConstant(globalState.getRandomly().getDouble());
- constant.setValueInDocument(document, key);
- return;
- case BOOLEAN:
- constant = new ArangoDBConstant.ArangoDBBooleanConstant(Randomly.getBoolean());
- constant.setValueInDocument(document, key);
- return;
- case INTEGER:
- constant = new ArangoDBConstant.ArangoDBIntegerConstant((int) globalState.getRandomly().getInteger());
- constant.setValueInDocument(document, key);
- return;
- default:
- throw new AssertionError(dataType);
- }
-
- }
-}
diff --git a/src/sqlancer/arangodb/query/ArangoDBCreateIndexQuery.java b/src/sqlancer/arangodb/query/ArangoDBCreateIndexQuery.java
deleted file mode 100644
index 6c2cc1b75..000000000
--- a/src/sqlancer/arangodb/query/ArangoDBCreateIndexQuery.java
+++ /dev/null
@@ -1,54 +0,0 @@
-package sqlancer.arangodb.query;
-
-import java.util.Collections;
-
-import com.arangodb.ArangoCollection;
-
-import sqlancer.GlobalState;
-import sqlancer.Main;
-import sqlancer.arangodb.ArangoDBConnection;
-import sqlancer.arangodb.ArangoDBQueryAdapter;
-import sqlancer.arangodb.ArangoDBSchema;
-import sqlancer.common.query.ExpectedErrors;
-
-public class ArangoDBCreateIndexQuery extends ArangoDBQueryAdapter {
-
- private final ArangoDBSchema.ArangoDBColumn column;
-
- public ArangoDBCreateIndexQuery(ArangoDBSchema.ArangoDBColumn column) {
- this.column = column;
- }
-
- @Override
- public boolean couldAffectSchema() {
- return false;
- }
-
- @Override
- public > boolean execute(G globalState, String... fills)
- throws Exception {
- try {
- ArangoCollection collection = globalState.getConnection().getDatabase()
- .collection(column.getTable().getName());
- collection.ensureHashIndex(Collections.singletonList(column.getName()), null);
- Main.nrSuccessfulActions.addAndGet(1);
- return true;
- } catch (Exception e) {
- Main.nrUnsuccessfulActions.addAndGet(1);
- throw e;
- }
- }
-
- @Override
- public ExpectedErrors getExpectedErrors() {
- return new ExpectedErrors();
- }
-
- @Override
- public String getLogString() {
- StringBuilder stringBuilder = new StringBuilder();
- stringBuilder.append("db.").append(column.getTable().getName())
- .append(".ensureIndex({type: \"hash\", fields: [ \"").append(column.getName()).append("\" ]});");
- return stringBuilder.toString();
- }
-}
diff --git a/src/sqlancer/arangodb/query/ArangoDBCreateTableQuery.java b/src/sqlancer/arangodb/query/ArangoDBCreateTableQuery.java
deleted file mode 100644
index 00b3276d0..000000000
--- a/src/sqlancer/arangodb/query/ArangoDBCreateTableQuery.java
+++ /dev/null
@@ -1,44 +0,0 @@
-package sqlancer.arangodb.query;
-
-import sqlancer.GlobalState;
-import sqlancer.Main;
-import sqlancer.arangodb.ArangoDBConnection;
-import sqlancer.arangodb.ArangoDBQueryAdapter;
-import sqlancer.common.query.ExpectedErrors;
-
-public class ArangoDBCreateTableQuery extends ArangoDBQueryAdapter {
-
- private final String tableName;
-
- public ArangoDBCreateTableQuery(String tableName) {
- this.tableName = tableName;
- }
-
- @Override
- public boolean couldAffectSchema() {
- return true;
- }
-
- @Override
- public > boolean execute(G globalState, String... fills)
- throws Exception {
- try {
- globalState.getConnection().getDatabase().createCollection(tableName);
- Main.nrSuccessfulActions.addAndGet(1);
- return true;
- } catch (Exception e) {
- Main.nrUnsuccessfulActions.addAndGet(1);
- throw e;
- }
- }
-
- @Override
- public ExpectedErrors getExpectedErrors() {
- return new ExpectedErrors();
- }
-
- @Override
- public String getLogString() {
- return "db._create(\"" + tableName + "\")";
- }
-}
diff --git a/src/sqlancer/arangodb/query/ArangoDBInsertQuery.java b/src/sqlancer/arangodb/query/ArangoDBInsertQuery.java
deleted file mode 100644
index 9a3612062..000000000
--- a/src/sqlancer/arangodb/query/ArangoDBInsertQuery.java
+++ /dev/null
@@ -1,66 +0,0 @@
-package sqlancer.arangodb.query;
-
-import java.util.Map;
-
-import com.arangodb.entity.BaseDocument;
-
-import sqlancer.GlobalState;
-import sqlancer.Main;
-import sqlancer.arangodb.ArangoDBConnection;
-import sqlancer.arangodb.ArangoDBQueryAdapter;
-import sqlancer.arangodb.ArangoDBSchema;
-import sqlancer.common.query.ExpectedErrors;
-
-public class ArangoDBInsertQuery extends ArangoDBQueryAdapter {
-
- private final ArangoDBSchema.ArangoDBTable table;
- private final BaseDocument documentToBeInserted;
-
- public ArangoDBInsertQuery(ArangoDBSchema.ArangoDBTable table, BaseDocument documentToBeInserted) {
- this.table = table;
- this.documentToBeInserted = documentToBeInserted;
- }
-
- @Override
- public boolean couldAffectSchema() {
- return true;
- }
-
- @Override
- public > boolean execute(G globalState, String... fills)
- throws Exception {
- try {
- globalState.getConnection().getDatabase().collection(table.getName()).insertDocument(documentToBeInserted);
- Main.nrSuccessfulActions.addAndGet(1);
- return true;
- } catch (Exception e) {
- Main.nrUnsuccessfulActions.addAndGet(1);
- throw e;
- }
- }
-
- @Override
- public ExpectedErrors getExpectedErrors() {
- return new ExpectedErrors();
- }
-
- @Override
- public String getLogString() {
- StringBuilder stringBuilder = new StringBuilder();
- stringBuilder.append("db._query(\"INSERT { ");
- String filler = "";
- for (Map.Entry stringObjectEntry : documentToBeInserted.getProperties().entrySet()) {
- stringBuilder.append(filler);
- filler = ", ";
- stringBuilder.append(stringObjectEntry.getKey()).append(": ");
- Object value = stringObjectEntry.getValue();
- if (value instanceof String) {
- stringBuilder.append("'").append(value).append("'");
- } else {
- stringBuilder.append(value);
- }
- }
- stringBuilder.append("} IN ").append(table.getName()).append("\")");
- return stringBuilder.toString();
- }
-}
diff --git a/src/sqlancer/arangodb/query/ArangoDBOptimizerRules.java b/src/sqlancer/arangodb/query/ArangoDBOptimizerRules.java
deleted file mode 100644
index 835849b92..000000000
--- a/src/sqlancer/arangodb/query/ArangoDBOptimizerRules.java
+++ /dev/null
@@ -1,57 +0,0 @@
-package sqlancer.arangodb.query;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import sqlancer.Randomly;
-
-public class ArangoDBOptimizerRules {
-
- private final List allRules = new ArrayList<>();
-
- public ArangoDBOptimizerRules() {
- // SRC:
- // https://www.arangodb.com/docs/stable/aql/execution-and-performance-optimizer.html#list-of-optimizer-rules
- // Filtered out irrelevant ones
- allRules.add("-fuse-filters");
- // allRules.add("-geo-index-optimizer");
- // allRules.add("-handle-arangosearch-views");
- // allRules.add("-inline-subqueries");
- allRules.add("-interchange-adjacent-enumerations");
- allRules.add("-late-document-materialization");
- // allRules.add("-late-document-materialization-arangosearch");
- allRules.add("-move-calculations-down");
- allRules.add("-move-calculations-up");
- allRules.add("-move-filters-into-enumerate");
- allRules.add("-move-filters-up");
- // allRules.add("-optimize-count");
- // allRules.add("-optimize-subqueries");
- // allRules.add("-optimize-traversals");
- // allRules.add("-patch-update-statements");
- allRules.add("-propagate-constant-attributes");
- allRules.add("-reduce-extraction-to-projection");
- // allRules.add("-remove-collect-variables");
- // allRules.add("-remove-data-modification-out-variables");
- allRules.add("-remove-filter-covered-by-index");
- // allRules.add("-remove-filter-covered-by-traversal");
- allRules.add("-remove-redundant-calculations");
- allRules.add("-remove-redundant-or");
- // allRules.add("-remove-redundant-path-var");
- // allRules.add("-remove-redundant-sorts");
- // allRules.add("-remove-sort-rand");
- allRules.add("-remove-unnecessary-calculations");
- allRules.add("-remove-unnecessary-filters");
- // allRules.add("-replace-function-with-index");
- allRules.add("-replace-or-with-in");
- allRules.add("-simplify-conditions");
- // allRules.add("-sort-in-values");
- // allRules.add("-sort-limit");
- // allRules.add("-splice-subqueries");
- // allRules.add("-use-index-for-sort");
- allRules.add("-use-indexes");
- }
-
- public List getRandomRules() {
- return Randomly.subset(allRules);
- }
-}
diff --git a/src/sqlancer/arangodb/query/ArangoDBSelectQuery.java b/src/sqlancer/arangodb/query/ArangoDBSelectQuery.java
deleted file mode 100644
index 4725e4178..000000000
--- a/src/sqlancer/arangodb/query/ArangoDBSelectQuery.java
+++ /dev/null
@@ -1,88 +0,0 @@
-package sqlancer.arangodb.query;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.stream.Collectors;
-
-import com.arangodb.ArangoCursor;
-import com.arangodb.entity.BaseDocument;
-import com.arangodb.model.AqlQueryOptions;
-
-import sqlancer.GlobalState;
-import sqlancer.arangodb.ArangoDBConnection;
-import sqlancer.arangodb.ArangoDBQueryAdapter;
-import sqlancer.common.query.ExpectedErrors;
-import sqlancer.common.query.SQLancerResultSet;
-
-public class ArangoDBSelectQuery extends ArangoDBQueryAdapter {
-
- private final String query;
-
- private List optimizerRules;
-
- private List resultSet;
-
- public ArangoDBSelectQuery(String query) {
- this.query = query;
- optimizerRules = new ArrayList<>();
- }
-
- @Override
- public boolean couldAffectSchema() {
- return false;
- }
-
- @Override
- public > boolean execute(G globalState, String... fills)
- throws Exception {
- throw new UnsupportedOperationException();
- }
-
- @Override
- public ExpectedErrors getExpectedErrors() {
- return new ExpectedErrors();
- }
-
- @Override
- public String getLogString() {
- if (optimizerRules.isEmpty()) {
- return "db._query(\"" + query + "\")";
- } else {
- String rules = optimizerRules.stream().map(Object::toString).collect(Collectors.joining("\",\""));
- return "db._query(\"" + query + "\", null, { optimizer: { rules: [\"" + rules + "\"] } } )";
- }
- }
-
- @Override
- public > SQLancerResultSet executeAndGet(G globalState,
- String... fills) throws Exception {
- if (globalState.getOptions().logEachSelect()) {
- globalState.getLogger().writeCurrent(this.getLogString());
- try {
- globalState.getLogger().getCurrentFileWriter().flush();
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
-
- ArangoCursor cursor;
- if (optimizerRules.isEmpty()) {
- cursor = globalState.getConnection().getDatabase().query(query, BaseDocument.class);
- } else {
- AqlQueryOptions options = new AqlQueryOptions();
- cursor = globalState.getConnection().getDatabase().query(query, options.rules(optimizerRules),
- BaseDocument.class);
- }
- resultSet = cursor.asListRemaining();
- return null;
- }
-
- public List getResultSet() {
- return resultSet;
- }
-
- public void excludeRandomOptRules() {
- optimizerRules = new ArangoDBOptimizerRules().getRandomRules();
- }
-}
diff --git a/src/sqlancer/arangodb/test/ArangoDBQueryPartitioningBase.java b/src/sqlancer/arangodb/test/ArangoDBQueryPartitioningBase.java
deleted file mode 100644
index f583ed04f..000000000
--- a/src/sqlancer/arangodb/test/ArangoDBQueryPartitioningBase.java
+++ /dev/null
@@ -1,67 +0,0 @@
-package sqlancer.arangodb.test;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import sqlancer.Randomly;
-import sqlancer.arangodb.ArangoDBProvider;
-import sqlancer.arangodb.ArangoDBSchema;
-import sqlancer.arangodb.ast.ArangoDBExpression;
-import sqlancer.arangodb.ast.ArangoDBSelect;
-import sqlancer.arangodb.gen.ArangoDBComputedExpressionGenerator;
-import sqlancer.arangodb.gen.ArangoDBFilterExpressionGenerator;
-import sqlancer.common.ast.newast.Node;
-import sqlancer.common.gen.ExpressionGenerator;
-import sqlancer.common.oracle.TernaryLogicPartitioningOracleBase;
-import sqlancer.common.oracle.TestOracle;
-
-public class ArangoDBQueryPartitioningBase
- extends TernaryLogicPartitioningOracleBase, ArangoDBProvider.ArangoDBGlobalState>
- implements TestOracle {
-
- protected ArangoDBSchema schema;
- protected List targetColumns;
- protected ArangoDBFilterExpressionGenerator expressionGenerator;
- protected ArangoDBSelect select;
- protected int numberComputedColumns;
-
- protected ArangoDBQueryPartitioningBase(ArangoDBProvider.ArangoDBGlobalState state) {
- super(state);
- }
-
- @Override
- protected ExpressionGenerator> getGen() {
- return expressionGenerator;
- }
-
- @Override
- public void check() throws Exception {
- numberComputedColumns = state.getRandomly().getInteger(0, 4);
- schema = state.getSchema();
- generateTargetColumns();
- expressionGenerator = new ArangoDBFilterExpressionGenerator(state).setColumns(targetColumns);
- expressionGenerator.setNumberOfComputedVariables(numberComputedColumns);
- initializeTernaryPredicateVariants();
- select = new ArangoDBSelect<>();
- select.setFromColumns(targetColumns);
- select.setProjectionColumns(Randomly.nonEmptySubset(targetColumns));
- generateComputedClause();
- }
-
- private void generateComputedClause() {
- List> computedColumns = new ArrayList<>();
- ArangoDBComputedExpressionGenerator generator = new ArangoDBComputedExpressionGenerator(state);
- generator.setColumns(targetColumns);
- for (int i = 0; i < numberComputedColumns; i++) {
- computedColumns.add(generator.generateExpression());
- }
- select.setComputedClause(computedColumns);
- }
-
- private void generateTargetColumns() {
- ArangoDBSchema.ArangoDBTables targetTables;
- targetTables = schema.getRandomTableNonEmptyTables();
- List allColumns = targetTables.getColumns();
- targetColumns = Randomly.nonEmptySubset(allColumns);
- }
-}
diff --git a/src/sqlancer/arangodb/test/ArangoDBQueryPartitioningWhereTester.java b/src/sqlancer/arangodb/test/ArangoDBQueryPartitioningWhereTester.java
deleted file mode 100644
index 6ad19fabf..000000000
--- a/src/sqlancer/arangodb/test/ArangoDBQueryPartitioningWhereTester.java
+++ /dev/null
@@ -1,46 +0,0 @@
-package sqlancer.arangodb.test;
-
-import static sqlancer.arangodb.ArangoDBComparatorHelper.assumeResultSetsAreEqual;
-import static sqlancer.arangodb.ArangoDBComparatorHelper.getResultSetAsDocumentList;
-
-import java.util.List;
-
-import com.arangodb.entity.BaseDocument;
-
-import sqlancer.arangodb.ArangoDBProvider;
-import sqlancer.arangodb.query.ArangoDBSelectQuery;
-import sqlancer.arangodb.visitor.ArangoDBVisitor;
-
-public class ArangoDBQueryPartitioningWhereTester extends ArangoDBQueryPartitioningBase {
- public ArangoDBQueryPartitioningWhereTester(ArangoDBProvider.ArangoDBGlobalState state) {
- super(state);
- }
-
- @Override
- public void check() throws Exception {
- super.check();
- select.setFilterClause(null);
-
- ArangoDBSelectQuery query = ArangoDBVisitor.asSelectQuery(select);
- List firstResultSet = getResultSetAsDocumentList(query, state);
-
- select.setFilterClause(predicate);
- query = ArangoDBVisitor.asSelectQuery(select);
- List secondResultSet = getResultSetAsDocumentList(query, state);
-
- select.setFilterClause(negatedPredicate);
- query = ArangoDBVisitor.asSelectQuery(select);
- List thirdResultSet = getResultSetAsDocumentList(query, state);
-
- thirdResultSet.addAll(secondResultSet);
- assumeResultSetsAreEqual(firstResultSet, thirdResultSet, query);
-
- if (state.getDbmsSpecificOptions().withOptimizerRuleTests) {
- select.setFilterClause(predicate);
- query = ArangoDBVisitor.asSelectQuery(select);
- query.excludeRandomOptRules();
- List forthResultSet = getResultSetAsDocumentList(query, state);
- assumeResultSetsAreEqual(secondResultSet, forthResultSet, query);
- }
- }
-}
diff --git a/src/sqlancer/arangodb/visitor/ArangoDBToQueryVisitor.java b/src/sqlancer/arangodb/visitor/ArangoDBToQueryVisitor.java
deleted file mode 100644
index f82995d5e..000000000
--- a/src/sqlancer/arangodb/visitor/ArangoDBToQueryVisitor.java
+++ /dev/null
@@ -1,134 +0,0 @@
-package sqlancer.arangodb.visitor;
-
-import java.util.HashSet;
-import java.util.List;
-import java.util.Set;
-
-import sqlancer.arangodb.ArangoDBSchema;
-import sqlancer.arangodb.ast.ArangoDBConstant;
-import sqlancer.arangodb.ast.ArangoDBExpression;
-import sqlancer.arangodb.ast.ArangoDBSelect;
-import sqlancer.arangodb.gen.ArangoDBComputedExpressionGenerator;
-import sqlancer.arangodb.query.ArangoDBSelectQuery;
-import sqlancer.common.ast.newast.ColumnReferenceNode;
-import sqlancer.common.ast.newast.NewBinaryOperatorNode;
-import sqlancer.common.ast.newast.NewFunctionNode;
-import sqlancer.common.ast.newast.NewUnaryPrefixOperatorNode;
-import sqlancer.common.ast.newast.Node;
-
-public class ArangoDBToQueryVisitor extends ArangoDBVisitor {
-
- private final StringBuilder stringBuilder;
-
- public ArangoDBToQueryVisitor() {
- stringBuilder = new StringBuilder();
- }
-
- @Override
- protected void visit(ArangoDBSelect expression) {
- generateFrom(expression);
- generateComputed(expression);
- generateFilter(expression);
- generateProject(expression);
- }
-
- private void generateFilter(ArangoDBSelect expression) {
- if (expression.hasFilter()) {
- stringBuilder.append("FILTER ");
- visit(expression.getFilterClause());
- stringBuilder.append(" ");
- }
- }
-
- private void generateComputed(ArangoDBSelect expression) {
- if (expression.hasComputed()) {
- List> computedClause = expression.getComputedClause();
- int computedNumber = 0;
- for (Node computedExpression : computedClause) {
- stringBuilder.append("LET c").append(computedNumber).append(" = ");
- visit(computedExpression);
- stringBuilder.append(" ");
- computedNumber++;
- }
- }
- }
-
- @Override
- protected void visit(ColumnReferenceNode expression) {
- if (expression.getColumn().getTable().getName().equals("")) {
- stringBuilder.append(expression.getColumn().getName());
- } else {
- stringBuilder.append("r").append(expression.getColumn().getTable().getName()).append(".")
- .append(expression.getColumn().getName());
- }
- }
-
- @Override
- protected void visit(ArangoDBConstant expression) {
- stringBuilder.append(expression.getValue());
- }
-
- @Override
- protected void visit(NewBinaryOperatorNode expression) {
- stringBuilder.append("(");
- visit(expression.getLeft());
- stringBuilder.append(" ").append(expression.getOperatorRepresentation()).append(" ");
- visit(expression.getRight());
- stringBuilder.append(")");
- }
-
- @Override
- protected void visit(NewUnaryPrefixOperatorNode expression) {
- stringBuilder.append(expression.getOperatorRepresentation()).append("(");
- visit(expression.getExpr());
- stringBuilder.append(")");
- }
-
- @Override
- protected void visit(NewFunctionNode expression) {
- if (!(expression.getFunc() instanceof ArangoDBComputedExpressionGenerator.ComputedFunction)) {
- throw new UnsupportedOperationException();
- }
- ArangoDBComputedExpressionGenerator.ComputedFunction function = (ArangoDBComputedExpressionGenerator.ComputedFunction) expression
- .getFunc();
- // TODO: Support functions with a different number of arguments.
- if (function.getNrArgs() != 2) {
- throw new UnsupportedOperationException();
- }
- stringBuilder.append("(");
- visit(expression.getArgs().get(0));
- stringBuilder.append(" ").append(function.getOperatorName()).append(" ");
- visit(expression.getArgs().get(1));
- stringBuilder.append(")");
- }
-
- private void generateFrom(ArangoDBSelect expression) {
- List forColumns = expression.getFromColumns();
- Set tables = new HashSet<>();
- for (ArangoDBSchema.ArangoDBColumn column : forColumns) {
- tables.add(column.getTable());
- }
-
- for (ArangoDBSchema.ArangoDBTable table : tables) {
- stringBuilder.append("FOR r").append(table.getName()).append(" IN ").append(table.getName()).append(" ");
- }
- }
-
- private void generateProject(ArangoDBSelect expression) {
- List projectColumns = expression.getProjectionColumns();
- stringBuilder.append("RETURN {");
- String filler = "";
- for (ArangoDBSchema.ArangoDBColumn column : projectColumns) {
- stringBuilder.append(filler);
- filler = ", ";
- stringBuilder.append(column.getTable().getName()).append("_").append(column.getName()).append(": r")
- .append(column.getTable().getName()).append(".").append(column.getName());
- }
- stringBuilder.append("}");
- }
-
- public ArangoDBSelectQuery getQuery() {
- return new ArangoDBSelectQuery(stringBuilder.toString());
- }
-
-}
diff --git a/src/sqlancer/arangodb/visitor/ArangoDBVisitor.java b/src/sqlancer/arangodb/visitor/ArangoDBVisitor.java
deleted file mode 100644
index f1db84cf5..000000000
--- a/src/sqlancer/arangodb/visitor/ArangoDBVisitor.java
+++ /dev/null
@@ -1,51 +0,0 @@
-package sqlancer.arangodb.visitor;
-
-import sqlancer.arangodb.ast.ArangoDBConstant;
-import sqlancer.arangodb.ast.ArangoDBExpression;
-import sqlancer.arangodb.ast.ArangoDBSelect;
-import sqlancer.arangodb.query.ArangoDBSelectQuery;
-import sqlancer.common.ast.newast.ColumnReferenceNode;
-import sqlancer.common.ast.newast.NewBinaryOperatorNode;
-import sqlancer.common.ast.newast.NewFunctionNode;
-import sqlancer.common.ast.newast.NewUnaryPrefixOperatorNode;
-import sqlancer.common.ast.newast.Node;
-
-public abstract class ArangoDBVisitor {
-
- protected abstract void visit(ArangoDBSelect expression);
-
- protected abstract void visit(ColumnReferenceNode expression);
-
- protected abstract void visit(ArangoDBConstant expression);
-
- protected abstract void visit(NewBinaryOperatorNode