diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..f284b19 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,51 @@ +# Git files +.git +.gitignore +.gitmodules + +# Python cache +__pycache__ +*.pyc +*.pyo +*.pyd +.Python +*.so +*.egg +*.egg-info +dist +build +.pytest_cache + +# IDEs +.vscode +.idea +*.swp +*.swo +*~ + +# Documentation +*.md +!README.md +docs/ + +# Data and output directories (keep structure but exclude large files) +data/output/ +data/tmp/ +*.avi +*.mp4 +*.png +*.jpg +*.jpeg + +# Logs +*.log + +# OS files +.DS_Store +Thumbs.db + +# Exclude reference projects to reduce build context +refproj/ + +# Development toolkit +weather_dev_toolkit/ diff --git a/.gitignore b/.gitignore index 84554fb..88b11fd 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,9 @@ common/__pycache__ *.prof *.lprof automate_error_*.txt -automate_log_*.txt \ No newline at end of file +automate_log_*.txt +3rdparty/boost_1_62_0/ +3rdparty/boost_1_62_0.tar.gz +3rdparty/osg/ +docker-build*.log +*.lo diff --git a/3rdparty/weather-particle-simulator b/3rdparty/weather-particle-simulator index 6a130aa..19fa03b 160000 --- a/3rdparty/weather-particle-simulator +++ b/3rdparty/weather-particle-simulator @@ -1 +1 @@ -Subproject commit 6a130aa7294dd980940416e8e0b21fef7112efaa +Subproject commit 19fa03b419a32902abbf0ec899ff0ca6e77b56fa diff --git a/DOCKER.md b/DOCKER.md new file mode 100644 index 0000000..36910de --- /dev/null +++ b/DOCKER.md @@ -0,0 +1,267 @@ +# Docker Setup for Weather Particle Simulator + +This Docker container provides a complete environment for the weather-particle-simulator project with all required dependencies pre-installed. + +> **Quick Start**: New to Docker? Check out [QUICKSTART_DOCKER.md](QUICKSTART_DOCKER.md) for a step-by-step guide! +> +> **Convenience**: Use the included [Makefile](Makefile) for easy commands like `make build`, `make run`, and `make verify`. + +## Dependencies Included + +- **Ubuntu 20.04** base image +- **C++11** compiler support +- **OpenCV 3.2.0** - Built from source with optimizations +- **Boost 1.62.0** - Full library installation +- **OpenSceneGraph 3.4.1** - 3D graphics toolkit +- **Python 3** with required packages (pexpect, numpy, opencv-python, pillow) + +## Prerequisites + +- Docker installed on your system ([Install Docker](https://docs.docker.com/get-docker/)) +- Docker Compose (usually included with Docker Desktop) +- At least 10GB of free disk space for the image +- (Optional) NVIDIA Docker runtime for GPU support + +## Building the Container + +### Option 1: Using Docker directly + +```bash +# Build the image (takes ~20-30 minutes on first build) +docker build -t weather-particle-simulator:latest . + +# Verify the build +docker images | grep weather-particle-simulator +``` + +### Option 2: Using Docker Compose + +```bash +# Build the image +docker-compose build + +# Or build with no cache (clean build) +docker-compose build --no-cache +``` + +## Running the Container + +### Interactive Shell + +```bash +# Using Docker +docker run -it --rm \ + -v $(pwd):/workspace \ + weather-particle-simulator:latest \ + /bin/bash + +# Using Docker Compose +docker-compose run --rm weather-simulator +``` + +### Running the Weather Particle Simulator + +```bash +# Test that all dependencies are installed correctly +docker run -it --rm \ + -v $(pwd):/workspace \ + weather-particle-simulator:latest \ + bash -c "cd /workspace/3rdparty/weather-particle-simulator/lin_x64 && ./AHLSimulation" + +# If successful, you'll see the simulator start. Type '0' and press Enter to exit. +``` + +### Running the Rain Rendering Project + +```bash +# Using main script +docker run -it --rm \ + -v $(pwd):/workspace \ + weather-particle-simulator:latest \ + python3 main.py + +# Using threaded version +docker run -it --rm \ + -v $(pwd):/workspace \ + weather-particle-simulator:latest \ + python3 main_threaded.py + +# Using Docker Compose +docker-compose run --rm rain-rendering +``` + +## X11 Forwarding (for GUI applications on Linux) + +If you need to run GUI applications (like the weather simulator with visualization): + +```bash +# Allow Docker to access X server +xhost +local:docker + +# Run with X11 forwarding +docker run -it --rm \ + -e DISPLAY=$DISPLAY \ + -v /tmp/.X11-unix:/tmp/.X11-unix:rw \ + -v $(pwd):/workspace \ + weather-particle-simulator:latest \ + bash + +# When done, remove access +xhost -local:docker +``` + +Or use Docker Compose (X11 forwarding is already configured): + +```bash +xhost +local:docker +docker-compose run --rm weather-simulator +xhost -local:docker +``` + +## Volume Mounts + +The container is configured to mount the following directories: + +- Current directory → `/workspace` (main project files) +- `./data` → `/workspace/data` (input/output data) +- `./config` → `/workspace/config` (configuration files) + +All changes in these directories are persisted on your host machine. + +## Troubleshooting + +### Build Issues + +**Problem**: Build fails with "unable to download boost/opencv/osg" +```bash +# Solution: Check your internet connection and retry +docker build --no-cache -t weather-particle-simulator:latest . +``` + +**Problem**: Out of disk space +```bash +# Clean up old Docker images and containers +docker system prune -a +``` + +### Runtime Issues + +**Problem**: `./AHLSimulation` fails with library not found +```bash +# Solution: Check that libraries are correctly installed +docker run -it --rm weather-particle-simulator:latest ldconfig -p | grep -E "boost|opencv|osg" +``` + +**Problem**: Permission denied errors +```bash +# Solution: Run with your user ID +docker run -it --rm \ + -u $(id -u):$(id -g) \ + -v $(pwd):/workspace \ + weather-particle-simulator:latest \ + /bin/bash +``` + +**Problem**: X11 forwarding not working +```bash +# On Linux, ensure xhost is allowing connections +xhost +local:docker + +# On macOS, install XQuartz and set DISPLAY +# export DISPLAY=host.docker.internal:0 + +# On Windows, use VcXsrv or Xming +``` + +## Advanced Usage + +### Custom Python Dependencies + +If you need additional Python packages: + +```bash +# Enter the container +docker run -it --rm -v $(pwd):/workspace weather-particle-simulator:latest bash + +# Install packages +pip3 install +``` + +Or modify the Dockerfile and add packages to the `pip3 install` command. + +### Multi-stage Builds + +The Dockerfile uses a multi-stage build to reduce final image size: +- **Builder stage**: Compiles all dependencies from source +- **Runtime stage**: Contains only runtime libraries and binaries + +### Debugging + +```bash +# Enter the builder stage for debugging +docker build --target builder -t weather-particle-simulator:builder . +docker run -it --rm weather-particle-simulator:builder /bin/bash +``` + +## Performance Notes + +- **Build time**: First build takes 20-30 minutes depending on your machine +- **Image size**: Final image is approximately 2-3GB +- **Build cache**: Subsequent builds are much faster if dependencies haven't changed +- **CPU usage**: Compilation uses all available cores (`-j$(nproc)`) + +## Updating Dependencies + +To update to different versions, modify the Dockerfile: + +```dockerfile +# For OpenCV (change version in wget URL) +wget -q -O opencv.zip https://github.com/opencv/opencv/archive/X.Y.Z.zip + +# For Boost (change version in URL and folder name) +wget -q https://sourceforge.net/projects/boost/files/boost/X.Y.Z/boost_X_Y_Z.tar.gz + +# For OpenSceneGraph (change version in wget URL) +wget -q -O osg.zip https://github.com/openscenegraph/OpenSceneGraph/archive/OpenSceneGraph-X.Y.Z.zip +``` + +## CI/CD Integration + +### GitHub Actions Example + +```yaml +name: Docker Build + +on: [push] + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + submodules: recursive + + - name: Build Docker image + run: docker build -t weather-particle-simulator:latest . + + - name: Test installation + run: | + docker run --rm weather-particle-simulator:latest \ + ldconfig -p | grep -E "boost|opencv|osg" +``` + +## License + +This Dockerfile is provided as-is for building the weather-particle-simulator environment. Please refer to individual component licenses: +- Weather Particle Simulator: MIT License (Carnegie Mellon University) +- OpenCV: Apache 2.0 License +- Boost: Boost Software License +- OpenSceneGraph: OpenSceneGraph Public License (OSGPL) + +## References + +- [Weather Particle Simulator](https://github.com/astra-vision/weather-particle-simulator) +- [OpenCV Installation Guide](https://linuxize.com/post/how-to-install-opencv-on-ubuntu-18-04/) +- [Boost Installation](https://stackoverflow.com/questions/12578499/how-to-install-boost-on-ubuntu/41272796#41272796) +- [OpenSceneGraph Installation](https://vicrucann.github.io/tutorials/osg-linux-quick-install/) diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..0c1d5b3 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,229 @@ +# Dockerfile for rain-rendering with all dependencies +# Based on Ubuntu 18.04 (Bionic) with C++11, OpenCV 3.2.0, Boost 1.62.0, and OpenSceneGraph 3.4.1 +# Uses AHLSimulation_bionic binary compiled for Ubuntu 18.04 +# +# Build: docker build -t rain-rendering:latest . +# Run: docker run -it --rm -v $(pwd):/workspace rain-rendering:latest + +FROM ubuntu:18.04 + +# Prevent interactive prompts during package installation +ENV DEBIAN_FRONTEND=noninteractive +ENV TZ=UTC + +# Install base build tools and dependencies (exactly as specified) +RUN apt-get update && apt-get install -y \ + build-essential \ + g++ \ + cmake \ + git \ + pkg-config \ + libgtk-3-dev \ + libavcodec-dev \ + libavformat-dev \ + libswscale-dev \ + libv4l-dev \ + libxvidcore-dev \ + libx264-dev \ + libjpeg-dev \ + libpng-dev \ + libtiff-dev \ + gfortran \ + openexr \ + libatlas-base-dev \ + python3-dev \ + python3-numpy \ + python3-pip \ + autotools-dev \ + libicu-dev \ + libbz2-dev \ + libtbb2 \ + libtbb-dev \ + libdc1394-22-dev \ + wget \ + unzip \ + && rm -rf /var/lib/apt/lists/* + +# Set C++11 standard +ENV CXXFLAGS="-std=c++11" + +WORKDIR /tmp + +############################################# +# Clone repositories directly (avoiding submodule commit pinning) +############################################# +RUN echo "Cloning rain-rendering repository..." && \ + git clone https://github.com/astra-vision/rain-rendering.git /workspace && \ + echo "Cloning weather-particle-simulator directly to get latest binaries..." && \ + mkdir -p /workspace/3rdparty && \ + cd /workspace/3rdparty && \ + git clone https://github.com/astra-vision/weather-particle-simulator.git && \ + echo "Patching tools/simulation.py for compatibility..." && \ + cd /workspace && \ + sed -i "s/self.interact('Steps: What do you want to do \\\\?', menu)/self.interact('What do you want to do \\\\?', menu)/g" tools/simulation.py && \ + sed -i "s/'AHLSimulation'/'AHLSimulation_bionic'/g" tools/simulation.py && \ + sed -i "s/self.child.expect('Steps: What do you want to do \\\\?')/self.child.expect('What do you want to do \\\\?')/g" tools/simulation.py && \ + echo "Patches applied successfully!" + +############################################# +# Download and extract rain streak database +############################################# +RUN echo "Downloading Columbia Uni. rain streak database..." && \ + cd /workspace/3rdparty && \ + wget -O databases.zip https://cave.cs.columbia.edu/old/databases/rain_streak_db/databases.zip && \ + echo "Extracting to rainstreakdb..." && \ + mkdir -p rainstreakdb && \ + unzip -q databases.zip -d rainstreakdb && \ + rm databases.zip && \ + echo "Rain streak database installed successfully!" + +############################################# +# Install Boost 1.62.0 (following StackOverflow guide) +# Note: Excluding python libraries as they're not needed and cause build issues +############################################# +RUN echo "Installing Boost 1.62.0..." && \ + wget -O boost_1_62_0.tar.gz https://sourceforge.net/projects/boost/files/boost/1.62.0/boost_1_62_0.tar.gz/download && \ + tar xzf boost_1_62_0.tar.gz && \ + cd boost_1_62_0 && \ + ./bootstrap.sh --prefix=/usr/local && \ + ./b2 --without-python -j $(nproc) install && \ + cd .. && \ + rm -rf boost_1_62_0 boost_1_62_0.tar.gz && \ + echo "/usr/local/lib" >> /etc/ld.so.conf.d/local.conf && \ + ldconfig + +############################################# +# Fix videodev.h for OpenCV (create symlink for V4L) +############################################# +RUN cd /usr/include/linux && \ + ln -s ../libv4l1-videodev.h videodev.h + +############################################# +# Install OpenCV 3.2.0 from source (following linuxize guide EXACTLY) +############################################# +RUN echo "Installing OpenCV 3.2.0..." && \ + mkdir ~/opencv_build && cd ~/opencv_build && \ + git clone https://github.com/opencv/opencv.git && \ + git clone https://github.com/opencv/opencv_contrib.git && \ + cd opencv && git checkout 3.2.0 && cd .. && \ + cd opencv_contrib && git checkout 3.2.0 && cd .. && \ + cd ~/opencv_build/opencv && \ + echo "Applying ffmpeg compatibility patch..." && \ + sed -i '1i #define AV_CODEC_FLAG_GLOBAL_HEADER (1 << 22)\n#define CODEC_FLAG_GLOBAL_HEADER AV_CODEC_FLAG_GLOBAL_HEADER\n#define AVFMT_RAWPICTURE 0x0020' modules/videoio/src/cap_ffmpeg_impl.hpp && \ + mkdir build && cd build && \ + cmake -D CMAKE_BUILD_TYPE=RELEASE \ + -D CMAKE_INSTALL_PREFIX=/usr/local \ + -D INSTALL_C_EXAMPLES=OFF \ + -D INSTALL_PYTHON_EXAMPLES=OFF \ + -D OPENCV_GENERATE_PKGCONFIG=ON \ + -D OPENCV_EXTRA_MODULES_PATH=~/opencv_build/opencv_contrib/modules \ + -D BUILD_EXAMPLES=OFF .. && \ + (make -j$(nproc) || (echo "Build failed! Saving logs..."; cp CMakeFiles/CMakeOutput.log /tmp/opencv-cmake-output.log 2>/dev/null; cp CMakeFiles/CMakeError.log /tmp/opencv-cmake-error.log 2>/dev/null; echo "=== CMake Error Log ===" && cat /tmp/opencv-cmake-error.log 2>/dev/null; exit 1)) && \ + make install && \ + echo "Build successful! Saving logs for reference..." && \ + cp CMakeFiles/CMakeOutput.log /tmp/opencv-cmake-output.log 2>/dev/null && \ + cp CMakeFiles/CMakeError.log /tmp/opencv-cmake-error.log 2>/dev/null && \ + touch /tmp/opencv-cmake-output.log /tmp/opencv-cmake-error.log && \ + cd && \ + rm -rf ~/opencv_build && \ + ldconfig + +############################################# +# Install OpenSceneGraph 3.4.1 +############################################# +RUN echo "Installing OpenSceneGraph dependencies..." && \ + apt-get update && apt-get install -y \ + libx11-dev \ + libxrandr-dev \ + libglu1-mesa-dev \ + libfreetype6-dev \ + libopenthreads-dev \ + libcurl4-openssl-dev \ + && rm -rf /var/lib/apt/lists/* + +RUN echo "Installing OpenSceneGraph 3.4.1..." && \ + wget -q -O osg.zip https://github.com/openscenegraph/OpenSceneGraph/archive/OpenSceneGraph-3.4.1.zip && \ + unzip -q osg.zip && \ + cd OpenSceneGraph-OpenSceneGraph-3.4.1 && \ + mkdir build && \ + cd build && \ + cmake -D CMAKE_BUILD_TYPE=Release \ + -D CMAKE_INSTALL_PREFIX=/usr/local \ + -D CMAKE_CXX_FLAGS="-std=c++11" \ + .. && \ + make -j$(nproc) && \ + make install && \ + cd ../.. && \ + rm -rf OpenSceneGraph-OpenSceneGraph-3.4.1 osg.zip && \ + echo "/usr/local/lib64" >> /etc/ld.so.conf.d/local.conf && \ + ldconfig + +############################################# +# Verify weather-particle-simulator binary works (using bionic version) +############################################# +RUN echo "Verifying AHLSimulation_bionic binary..." && \ + cd /workspace/3rdparty/weather-particle-simulator/lin_x64 && \ + echo "Checking if binary exists..." && \ + ls -lah AHLSimulation_bionic && \ + echo "Making binary executable..." && \ + chmod +x AHLSimulation_bionic && \ + echo "Creating symlink for convenience..." && \ + ln -sf AHLSimulation_bionic AHLSimulation && \ + echo "Checking library dependencies..." && \ + ldd AHLSimulation_bionic && \ + echo "Binary verification complete!" + +############################################# +# Install Python dependencies for rain-rendering project +############################################# +RUN pip3 install --upgrade pip setuptools wheel && \ + pip3 install --no-cache-dir \ + numpy \ + matplotlib \ + tqdm \ + imageio \ + pillow \ + natsort \ + glob2 \ + scipy \ + scikit-learn \ + scikit-image \ + pexpect \ + pyclipper \ + imutils + +############################################# +# Set up environment variables +############################################# +ENV LD_LIBRARY_PATH=/usr/local/lib:/usr/local/lib64:${LD_LIBRARY_PATH:-} +ENV PATH=/usr/local/bin:${PATH:-} + +############################################# +# Copy OpenCV build logs for reference +############################################# +RUN mkdir -p /var/log/opencv-build && \ + cp /tmp/opencv-cmake-output.log /var/log/opencv-build/ 2>/dev/null || true && \ + cp /tmp/opencv-cmake-error.log /var/log/opencv-build/ 2>/dev/null || true + +############################################# +# Final verification (commented out - already verified after OSG installation) +############################################# +#RUN echo "Final verification..." && \ +# echo "Checking libraries..." && \ +# ldconfig -p | grep -E "boost|opencv|osg" || true && \ +# echo "Python version:" && \ +# python3 --version && \ +# echo "Verifying AHLSimulation dependencies..." && \ +# cd /workspace/3rdparty/weather-particle-simulator/lin_x64 && \ +# ldd AHLSimulation && \ +# if ldd AHLSimulation | grep -q "not found"; then \ +# echo "ERROR: Missing dependencies for AHLSimulation!"; \ +# exit 1; \ +# fi && \ +# echo "All dependencies satisfied! Installation complete." + +# Set working directory +WORKDIR /workspace + +# Default command +CMD ["/bin/bash"] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..b6b7fc1 --- /dev/null +++ b/Makefile @@ -0,0 +1,158 @@ +# Makefile for Rain Rendering Docker Container +# Provides convenient commands for building and running the container + +.PHONY: help build build-no-cache run shell verify clean test + +# Image name +IMAGE_NAME = rain-rendering +IMAGE_TAG = latest + +# Colors for output +BLUE = \033[0;34m +GREEN = \033[0;32m +RED = \033[0;31m +NC = \033[0m # No Color + +help: + @echo "$(BLUE)Rain Rendering - Docker Commands$(NC)" + @echo "" + @echo "Available commands:" + @echo " $(GREEN)make build$(NC) - Build the Docker image" + @echo " $(GREEN)make build-no-cache$(NC) - Build the Docker image without cache" + @echo " $(GREEN)make run$(NC) - Run the main_threaded.py script" + @echo " $(GREEN)make shell$(NC) - Start an interactive shell in the container" + @echo " $(GREEN)make verify$(NC) - Verify all dependencies are correctly installed" + @echo " $(GREEN)make test-simulator$(NC) - Test the weather particle simulator binary" + @echo " $(GREEN)make clean$(NC) - Remove the Docker image" + @echo " $(GREEN)make clean-all$(NC) - Remove all Docker images and containers" + @echo "" + @echo "Docker Compose commands:" + @echo " $(GREEN)make compose-build$(NC) - Build using docker-compose" + @echo " $(GREEN)make compose-up$(NC) - Start services using docker-compose" + @echo " $(GREEN)make compose-down$(NC) - Stop services using docker-compose" + @echo "" + +build: + @echo "$(BLUE)Building Docker image...$(NC)" + @echo "This may take 20-30 minutes on first build." + docker build -t $(IMAGE_NAME):$(IMAGE_TAG) . + @echo "$(GREEN)Build complete!$(NC)" + +build-no-cache: + @echo "$(BLUE)Building Docker image without cache...$(NC)" + docker build --no-cache -t $(IMAGE_NAME):$(IMAGE_TAG) . + @echo "$(GREEN)Build complete!$(NC)" + +run: + @echo "$(BLUE)Running main_threaded.py...$(NC)" + docker run -it --rm \ + -v $(PWD):/workspace \ + $(IMAGE_NAME):$(IMAGE_TAG) \ + python3 main_threaded.py + +shell: + @echo "$(BLUE)Starting interactive shell...$(NC)" + docker run -it --rm \ + -v $(PWD):/workspace \ + $(IMAGE_NAME):$(IMAGE_TAG) \ + /bin/bash + +verify: + @echo "$(BLUE)Running verification script...$(NC)" + docker run -it --rm \ + -v $(PWD):/workspace \ + $(IMAGE_NAME):$(IMAGE_TAG) \ + /workspace/scripts/verify_docker.sh + +test-simulator: + @echo "$(BLUE)Testing weather particle simulator...$(NC)" + @echo "The simulator will start. Type '0' and press Enter to exit." + docker run -it --rm \ + -v $(PWD):/workspace \ + $(IMAGE_NAME):$(IMAGE_TAG) \ + /workspace/3rdparty/weather-particle-simulator/lin_x64/AHLSimulation + +clean: + @echo "$(RED)Removing Docker image...$(NC)" + docker rmi $(IMAGE_NAME):$(IMAGE_TAG) + @echo "$(GREEN)Image removed!$(NC)" + +clean-all: + @echo "$(RED)Removing all Docker images and containers...$(NC)" + docker system prune -a + @echo "$(GREEN)Cleanup complete!$(NC)" + +# Docker Compose commands +compose-build: + @echo "$(BLUE)Building with docker-compose...$(NC)" + docker-compose build + @echo "$(GREEN)Build complete!$(NC)" + +compose-up: + @echo "$(BLUE)Starting services with docker-compose...$(NC)" + docker-compose up -d + @echo "$(GREEN)Services started!$(NC)" + +compose-down: + @echo "$(BLUE)Stopping services...$(NC)" + docker-compose down + @echo "$(GREEN)Services stopped!$(NC)" + +# X11 forwarding helpers (Linux only) +x11-enable: + @echo "$(BLUE)Enabling X11 forwarding...$(NC)" + xhost +local:docker + @echo "$(GREEN)X11 forwarding enabled!$(NC)" + +x11-disable: + @echo "$(BLUE)Disabling X11 forwarding...$(NC)" + xhost -local:docker + @echo "$(GREEN)X11 forwarding disabled!$(NC)" + +shell-x11: + @echo "$(BLUE)Starting shell with X11 forwarding...$(NC)" + docker run -it --rm \ + -e DISPLAY=$(DISPLAY) \ + -v /tmp/.X11-unix:/tmp/.X11-unix:rw \ + -v $(PWD):/workspace \ + $(IMAGE_NAME):$(IMAGE_TAG) \ + /bin/bash + +# Development commands +dev-shell: + @echo "$(BLUE)Starting development shell with mounted workspace...$(NC)" + docker run -it --rm \ + -v $(PWD):/workspace \ + -w /workspace \ + $(IMAGE_NAME):$(IMAGE_TAG) \ + /bin/bash + +# Check if Docker is installed +check-docker: + @which docker > /dev/null || (echo "$(RED)Docker is not installed!$(NC)" && exit 1) + @echo "$(GREEN)Docker is installed$(NC)" + @docker --version + +# Check if Docker Compose is installed +check-compose: + @which docker-compose > /dev/null || (echo "$(RED)Docker Compose is not installed!$(NC)" && exit 1) + @echo "$(GREEN)Docker Compose is installed$(NC)" + @docker-compose --version + +# Show Docker image info +info: + @echo "$(BLUE)Docker Image Information$(NC)" + @echo "Name: $(IMAGE_NAME):$(IMAGE_TAG)" + @docker images $(IMAGE_NAME):$(IMAGE_TAG) || echo "$(RED)Image not built yet. Run 'make build' first.$(NC)" + +# Show running containers +ps: + @echo "$(BLUE)Running Containers$(NC)" + @docker ps -a | grep $(IMAGE_NAME) || echo "No containers running" + +# Install dependencies (build the image) +install: build verify + @echo "$(GREEN)Installation complete!$(NC)" + +# Default target +.DEFAULT_GOAL := help diff --git a/QUICKSTART_DOCKER.md b/QUICKSTART_DOCKER.md new file mode 100644 index 0000000..6f59160 --- /dev/null +++ b/QUICKSTART_DOCKER.md @@ -0,0 +1,247 @@ +# Docker Quick Start Guide + +This guide will get you up and running with the Weather Particle Simulator Docker container in minutes. + +## Prerequisites + +- Docker installed ([Get Docker](https://docs.docker.com/get-docker/)) +- At least 10GB free disk space +- 4GB+ RAM recommended + +## Quick Start (3 steps) + +### 1. Build the Container + +```bash +# Option A: Using Makefile (recommended) +make build + +# Option B: Using Docker directly +docker build -t weather-particle-simulator:latest . + +# Option C: Using Docker Compose +docker-compose build +``` + +**Note**: First build takes 20-30 minutes as it compiles OpenCV, Boost, and OpenSceneGraph from source. + +### 2. Verify Installation + +```bash +# Using Makefile +make verify + +# Using Docker directly +docker run -it --rm -v $(pwd):/workspace weather-particle-simulator:latest /workspace/scripts/verify_docker.sh +``` + +You should see output indicating all dependencies are correctly installed: +``` +Tests passed: 15 +Tests failed: 0 +All tests passed! Container is ready to use. +``` + +### 3. Run the Simulator + +```bash +# Test the weather particle simulator binary +make test-simulator + +# Or run your Python scripts +make run + +# Or get an interactive shell +make shell +``` + +## Common Use Cases + +### Interactive Development + +```bash +# Start a shell with your code mounted +make shell + +# Inside the container, you can: +cd /workspace +python3 main_threaded.py +cd 3rdparty/weather-particle-simulator/lin_x64 +./AHLSimulation +``` + +### Running Python Scripts + +```bash +# Run the main script +docker run -it --rm \ + -v $(pwd):/workspace \ + weather-particle-simulator:latest \ + python3 main.py + +# Or use the Makefile shortcut +make run +``` + +### With GUI Support (Linux only) + +```bash +# Enable X11 forwarding +make x11-enable + +# Run with GUI +make shell-x11 + +# Inside the container +cd 3rdparty/weather-particle-simulator/lin_x64 +./AHLSimulation + +# When done, disable X11 +make x11-disable +``` + +## Makefile Commands + +The included Makefile provides convenient shortcuts: + +```bash +make help # Show all available commands +make build # Build the Docker image +make build-no-cache # Clean build without cache +make shell # Interactive bash shell +make run # Run main_threaded.py +make verify # Verify installation +make test-simulator # Test the simulator binary +make clean # Remove Docker image +make info # Show image information +make ps # Show running containers +``` + +## Docker Compose + +If you prefer docker-compose: + +```bash +# Build +docker-compose build + +# Run interactive shell +docker-compose run --rm weather-simulator + +# Run the main script +docker-compose run --rm rain-rendering +``` + +## Troubleshooting + +### Build fails with download errors + +```bash +# Check internet connection and retry with no cache +make build-no-cache +``` + +### "Permission denied" when running scripts + +```bash +# Make sure scripts are executable +chmod +x scripts/*.sh +``` + +### Container can't find project files + +```bash +# Make sure you're running from the project root directory +pwd # Should show: .../rain-rendering + +# Verify volume mount is working +docker run -it --rm -v $(pwd):/workspace weather-particle-simulator:latest ls -la /workspace +``` + +### Binary missing libraries + +```bash +# Check library dependencies +docker run -it --rm -v $(pwd):/workspace weather-particle-simulator:latest \ + ldd /workspace/3rdparty/weather-particle-simulator/lin_x64/AHLSimulation +``` + +## Next Steps + +- Read [DOCKER.md](DOCKER.md) for detailed documentation +- Check [README.md](README.md) for project documentation +- Review [docker-compose.yml](docker-compose.yml) for service configuration + +## Architecture + +The Dockerfile uses a multi-stage build: + +1. **Builder Stage**: Compiles dependencies from source + - Boost 1.62.0 + - OpenCV 3.2.0 + - OpenSceneGraph 3.4.1 + +2. **Runtime Stage**: Copies only needed libraries and binaries + - Smaller final image (~2-3GB vs ~8GB) + - Faster container startup + +## Example Workflow + +Here's a complete workflow example: + +```bash +# 1. Clone repository and submodules +git clone +cd rain-rendering +git submodule update --init --recursive + +# 2. Build container (one-time, ~30 minutes) +make build + +# 3. Verify everything works +make verify + +# 4. Test the simulator +make test-simulator +# Type '0' and Enter to exit + +# 5. Run your Python code +make shell +# Inside container: +python3 main_threaded.py + +# 6. Access results on host +# (Results are saved in ./data or ./output, visible on your host) +``` + +## Performance Tips + +- **First build**: Use `make build` and let it run. Go grab coffee! +- **Rebuilding**: If you only change Python code, you don't need to rebuild +- **Cache**: Docker caches each layer. Builds after the first are much faster +- **Resources**: Allocate more CPU/RAM to Docker if builds are slow + +## Getting Help + +- `make help` - Show all Makefile commands +- Check [DOCKER.md](DOCKER.md) for detailed documentation +- Verify installation with `make verify` +- Check container logs: `docker logs ` + +## Minimum System Requirements + +- **OS**: Linux, macOS, or Windows with WSL2 +- **RAM**: 4GB minimum, 8GB recommended +- **Disk**: 10GB free space +- **CPU**: Multi-core recommended (for faster builds) + +## Supported Platforms + +- ✅ Ubuntu 18.04+ +- ✅ Debian 10+ +- ✅ macOS (Intel and Apple Silicon) +- ✅ Windows 10/11 with WSL2 + +--- + +**Ready to go?** Start with `make build` and you'll be running simulations in 30 minutes! diff --git a/config/slamperturbationlab_image_2.py b/config/slamperturbationlab_image_2.py new file mode 100644 index 0000000..0608db0 --- /dev/null +++ b/config/slamperturbationlab_image_2.py @@ -0,0 +1,50 @@ +"""Dynamic config for SLAMPerturbationLab KITTI rain rendering - image_2.""" +import os + +def resolve_paths(params): + # Use symlink path (relative to rain-rendering directory) + # Symlink at data/source/slamperturbationlab -> actual dataset + dataset_root = "data/source/slamperturbationlab" + sequence = "04" + + params.sequences = [sequence] + + # Set paths for specific camera + params.images = {sequence: os.path.join(dataset_root, "image_2")} + params.depth = {sequence: os.path.join(dataset_root, "image_2_depth")} + + # Use calib file if exists + calib_file = os.path.join(dataset_root, "calib.txt") + if os.path.exists(calib_file): + params.calib = {sequence: calib_file} + else: + params.calib = {sequence: None} + + return params + +def settings(): + settings = {} + + # Camera intrinsic parameters (KITTI-like defaults) + settings["cam_hz"] = 10 + settings["cam_CCD_WH"] = [1242, 375] + settings["cam_CCD_pixsize"] = 4.65 + settings["cam_WH"] = [1242, 375] + settings["cam_focal"] = 6 + settings["cam_gain"] = 20 + settings["cam_f_number"] = 6.0 + settings["cam_focus_plane"] = 6.0 + settings["cam_exposure"] = 2 + + # Camera extrinsic parameters + settings["cam_pos"] = [1.5, 1.5, 0.3] + settings["cam_lookat"] = [1.5, 1.5, -1.] + settings["cam_up"] = [0., 1., 0.] + + # Sequence-wise settings + settings["sequences"] = {} + settings["sequences"]["04"] = {} + settings["sequences"]["04"]["sim_mode"] = "normal" + settings["sequences"]["04"]["sim_duration"] = 100 # Long enough for sequence + + return settings diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..62384ef --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,48 @@ +version: '3.8' + +services: + rain-rendering: + build: + context: . + dockerfile: Dockerfile + image: rain-rendering:latest + container_name: rain-rendering + volumes: + # Mount current directory to /workspace + - .:/workspace + # Mount directories (like SLAMPerturbationLab) + - ./datasets:/datasets + - ./results:/results + - ./output:/output + - ./configs:/configs + - ./data:/data + # For X11 forwarding on Linux + - /tmp/.X11-unix:/tmp/.X11-unix:rw + working_dir: /workspace + environment: + - DISPLAY=${DISPLAY} + # Keep container running + stdin_open: true + tty: true + command: /bin/bash + + # Service for running the main script + rain-rendering-runner: + build: + context: . + dockerfile: Dockerfile + image: rain-rendering:latest + container_name: rain-rendering-runner + volumes: + - .:/workspace + - ./datasets:/datasets + - ./results:/results + - ./output:/output + - ./configs:/configs + - ./data:/data + working_dir: /workspace + environment: + - DISPLAY=${DISPLAY} + stdin_open: true + tty: true + command: python3 main_threaded.py diff --git a/main_threaded.py b/main_threaded.py index 644e32a..8fd8151 100644 --- a/main_threaded.py +++ b/main_threaded.py @@ -184,17 +184,17 @@ def check_arg(args): # Wait for an available thread print("Wait for threads") - while np.sum([t[2].isAlive() for t in threads]) >= max_thread: + while np.sum([t[2].is_alive() for t in threads]) >= max_thread: time.sleep(2) - thread_ended_mask = np.array([not t[2].isAlive() and t[2]._started.is_set() for t in threads]) + thread_ended_mask = np.array([not t[2].is_alive() and t[2]._started.is_set() for t in threads]) for t in threads[thread_ended_mask]: print("\nThread ended: ", t[2].toString()) threads = threads[~thread_ended_mask] # Wait for all threads if no remaining ones if np.sum(np.array([not t[2]._started.is_set() for t in threads])) == 0: - while np.sum([t[2].isAlive() for t in threads]) != 0: + while np.sum([t[2].is_alive() for t in threads]) != 0: time.sleep(2) print("All threads completed") diff --git a/scripts/verify_docker.sh b/scripts/verify_docker.sh new file mode 100755 index 0000000..6389e85 --- /dev/null +++ b/scripts/verify_docker.sh @@ -0,0 +1,185 @@ +#!/bin/bash +# Verification script for Docker container +# This script tests that all dependencies are correctly installed + +echo "==========================================" +echo "Docker Container Verification Script" +echo "==========================================" +echo "" + +# Color codes for output +GREEN='\033[0;32m' +RED='\033[0;31m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Test counter +TESTS_PASSED=0 +TESTS_FAILED=0 + +# Function to print test result +print_result() { + if [ $1 -eq 0 ]; then + echo -e "${GREEN}[PASS]${NC} $2" + ((TESTS_PASSED++)) + else + echo -e "${RED}[FAIL]${NC} $2" + ((TESTS_FAILED++)) + fi +} + +# Function to print info +print_info() { + echo -e "${YELLOW}[INFO]${NC} $1" +} + +echo "1. Checking system information..." +print_info "OS: $(cat /etc/os-release | grep PRETTY_NAME | cut -d'=' -f2 | tr -d '\"')" +print_info "Kernel: $(uname -r)" +print_info "Architecture: $(uname -m)" +echo "" + +echo "2. Checking C++ compiler..." +if command -v g++ &> /dev/null; then + VERSION=$(g++ --version | head -n1) + print_result 0 "g++ found: $VERSION" + + # Check C++11 support + echo "int main() { return 0; }" | g++ -std=c++11 -x c++ - -o /tmp/test 2>/dev/null + print_result $? "C++11 support verified" + rm -f /tmp/test +else + print_result 1 "g++ not found" +fi +echo "" + +echo "3. Checking Boost 1.62.0..." +if ldconfig -p | grep -q "libboost"; then + BOOST_VERSION=$(cat /usr/local/include/boost/version.hpp 2>/dev/null | grep "BOOST_LIB_VERSION" | head -n1 || echo "Version file not found") + print_result 0 "Boost libraries found" + print_info "Version info: $BOOST_VERSION" + + # List some key boost libraries + print_info "Available libraries:" + ldconfig -p | grep "libboost" | head -n 5 | awk '{print " - " $1}' +else + print_result 1 "Boost libraries not found" +fi +echo "" + +echo "4. Checking OpenCV 3.2.0..." +if ldconfig -p | grep -q "libopencv_core"; then + print_result 0 "OpenCV libraries found" + + # Try to get version + if command -v pkg-config &> /dev/null; then + CV_VERSION=$(pkg-config --modversion opencv 2>/dev/null || echo "Unable to get version") + print_info "OpenCV version: $CV_VERSION" + fi + + # List some key opencv libraries + print_info "Available libraries:" + ldconfig -p | grep "libopencv" | head -n 5 | awk '{print " - " $1}' + + # Test Python OpenCV + if python3 -c "import cv2; print('OpenCV Python:', cv2.__version__)" 2>/dev/null; then + print_result 0 "OpenCV Python bindings work" + else + print_result 1 "OpenCV Python bindings not working" + fi +else + print_result 1 "OpenCV libraries not found" +fi +echo "" + +echo "5. Checking OpenSceneGraph 3.4.1..." +if ldconfig -p | grep -q "libosg"; then + print_result 0 "OpenSceneGraph libraries found" + + # List some key OSG libraries + print_info "Available libraries:" + ldconfig -p | grep "libosg" | head -n 5 | awk '{print " - " $1}' + + # Check for osgviewer + if command -v osgviewer &> /dev/null; then + OSG_VERSION=$(osgviewer --version 2>&1 | head -n1 || echo "Unable to get version") + print_result 0 "osgviewer found" + print_info "$OSG_VERSION" + fi +else + print_result 1 "OpenSceneGraph libraries not found" +fi +echo "" + +echo "6. Checking Python environment..." +if command -v python3 &> /dev/null; then + PY_VERSION=$(python3 --version) + print_result 0 "Python3 found: $PY_VERSION" + + # Check required packages + for package in numpy pexpect PIL cv2; do + if python3 -c "import $package" 2>/dev/null; then + print_result 0 "Python package '$package' installed" + else + print_result 1 "Python package '$package' NOT installed" + fi + done +else + print_result 1 "Python3 not found" +fi +echo "" + +echo "7. Checking Weather Particle Simulator binary..." +if [ -f "/workspace/3rdparty/weather-particle-simulator/lin_x64/AHLSimulation" ]; then + print_result 0 "AHLSimulation binary found" + + # Check if executable + if [ -x "/workspace/3rdparty/weather-particle-simulator/lin_x64/AHLSimulation" ]; then + print_result 0 "AHLSimulation is executable" + else + print_result 1 "AHLSimulation is not executable" + fi + + # Check library dependencies + print_info "Checking binary dependencies..." + if ldd /workspace/3rdparty/weather-particle-simulator/lin_x64/AHLSimulation | grep -q "not found"; then + print_result 1 "Some library dependencies are missing:" + ldd /workspace/3rdparty/weather-particle-simulator/lin_x64/AHLSimulation | grep "not found" + else + print_result 0 "All binary dependencies satisfied" + fi +else + print_result 1 "AHLSimulation binary not found" + print_info "Make sure the project files are mounted to /workspace" +fi +echo "" + +echo "==========================================" +echo "Verification Summary" +echo "==========================================" +echo -e "Tests passed: ${GREEN}$TESTS_PASSED${NC}" +echo -e "Tests failed: ${RED}$TESTS_FAILED${NC}" +echo "" + +echo "==========================================" +echo "Build Logs Location" +echo "==========================================" +if [ -f "/var/log/opencv-build/opencv-cmake-output.log" ]; then + echo -e "${YELLOW}[INFO]${NC} OpenCV CMake output log: /var/log/opencv-build/opencv-cmake-output.log" +fi +if [ -f "/var/log/opencv-build/opencv-cmake-error.log" ]; then + echo -e "${YELLOW}[INFO]${NC} OpenCV CMake error log: /var/log/opencv-build/opencv-cmake-error.log" + if [ -s "/var/log/opencv-build/opencv-cmake-error.log" ]; then + echo -e "${YELLOW}[WARNING]${NC} Error log is not empty - there may have been build issues" + fi +fi +echo "" + +if [ $TESTS_FAILED -eq 0 ]; then + echo -e "${GREEN}All tests passed! Container is ready to use.${NC}" + exit 0 +else + echo -e "${RED}Some tests failed. Please check the errors above.${NC}" + echo -e "${YELLOW}[TIP]${NC} Check build logs at /var/log/opencv-build/ for more details" + exit 1 +fi diff --git a/tools/particles_simulation.py b/tools/particles_simulation.py index 82be8e8..5a97f40 100644 --- a/tools/particles_simulation.py +++ b/tools/particles_simulation.py @@ -26,7 +26,7 @@ def process(sim, force_recompute=False): def print_progress(): global _sim_status - threads_active = [t for t in threads if t._started.is_set() and t.isAlive()] + threads_active = [t for t in threads if t._started.is_set() and t.is_alive()] status = " | ".join(["#{id}: {time:.2f}/{dur:.2f}s".format(id=t.id, time=t.simtime, dur=t.simdur) for t in threads_active]) if status == _sim_status: return @@ -55,18 +55,18 @@ def print_progress(): # Wait for an available thread print("Wait for threads") - while np.sum([t.isAlive() for t in threads]) >= max_thread: + while np.sum([t.is_alive() for t in threads]) >= max_thread: time.sleep(2) print_progress() - thread_ended_mask = np.array([not t.isAlive() and t._started.is_set() for t in threads]) + thread_ended_mask = np.array([not t.is_alive() and t._started.is_set() for t in threads]) for t in threads[thread_ended_mask]: print("Thread ended: ", t.output_dir) threads = threads[~thread_ended_mask] # Wait for all threads if no remaining ones if np.sum(np.array([not t._started.is_set() for t in threads])) == 0: - while np.sum([t.isAlive() for t in threads]) != 0: + while np.sum([t.is_alive() for t in threads]) != 0: time.sleep(2) print_progress() diff --git a/tools/simulation.py b/tools/simulation.py index bc89474..d2a103e 100644 --- a/tools/simulation.py +++ b/tools/simulation.py @@ -75,7 +75,7 @@ def interact(self, wait_for, send_str): self.child.sendline(send_str.encode('ascii')) def interact_step_menu(self, menu): - self.interact('Steps: What do you want to do \?', menu) + self.interact('What do you want to do \?', menu) def set_sim_steps_times(self, start, dur, last): self.interact_step_menu('2') @@ -283,7 +283,17 @@ def run(self): log_path = os.path.join(self.output_dir, 'automate_log.txt') log_fp = open(log_path, 'a+') # self._print(self.output_dir) - self.child = PopenSpawn(os.path.join(self.bin_folder, 'AHLSimulation'), cwd=self.output_dir, logfile=logwriter(log_fp)) + + # Set LD_LIBRARY_PATH for Boost 1.62.0 and other libraries + env = os.environ.copy() + home = os.path.expanduser('~') + boost_lib_home = os.path.join(home, 'boost_1_62_0', 'lib') + boost_lib_local = os.path.join(os.getcwd(), '3rdparty', 'weather-particle-simulator', 'boost_1_62_0', 'stage', 'lib') + osg_lib = os.path.join(os.getcwd(), '3rdparty', 'osg', 'build', 'lib') + existing_ld_path = env.get('LD_LIBRARY_PATH', '') + env['LD_LIBRARY_PATH'] = f"{boost_lib_home}:{boost_lib_local}:{osg_lib}:/usr/local/lib:/usr/local/lib64:{existing_ld_path}" + + self.child = PopenSpawn(os.path.join(self.bin_folder, 'AHLSimulation_bionic'), cwd=self.output_dir, logfile=logwriter(log_fp), env=env) try: self._print("In main menu") @@ -433,7 +443,7 @@ def run(self): self.child.sendline(b'\n') if _steps_menu: - self.child.expect('Steps: What do you want to do \?') + self.child.expect('What do you want to do \?') self._print("In Step menu") self._print("Going to main menu") self.child.sendline('0'.encode('ascii')) diff --git a/weather_dev_toolkit/LICENSE b/weather_dev_toolkit/LICENSE new file mode 100644 index 0000000..0f695c8 --- /dev/null +++ b/weather_dev_toolkit/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2019 Bourré Pierre + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/weather_dev_toolkit/README.md b/weather_dev_toolkit/README.md new file mode 100644 index 0000000..f1823e3 --- /dev/null +++ b/weather_dev_toolkit/README.md @@ -0,0 +1,48 @@ +# Weather Dev-Toolkit + + +This tool allows easy download of the Weather Kitti and Weather Cityscapes datasets, generated with [Halder et al, 2019]. + +## Usage +Use the **weather_download-generate.py** script for seamless download of the Kitti/Cityscapes weather augmented dataset, as well as post-processed depth. +Weather Kitti has 3 set of data (object detection and 2 sequences) and Weather Cityscapes has 1 set (training set). + +To download all Weather Kitti + Weather Cityscapes: +`weather_download-generate.py all --kitti_root %PATH% cityscapes --cityscapes_root %PATH%` + +To download all Weather Kitti: +`weather_download-generate.py kitti --kitti_root %PATH%` + +To download all Weather Cityscapes: +`weather_download-generate.py cityscapes --cityscapes_root %PATH%` + + +It is possible to select the weather conditions with `-weather fog` or `-weather fog`. +Or select a specific sequence with `-sequence XXX`. + +### Pre-requisite +The script requires to have the original Kitti/Cityscapes prior to running the script (download them from original datasets website). +You must preserve original Kitti/Cityscapes file structure, and pass root folder as `--kitti_root` or `--cityscapes_root` parameter. + +File structure expected in kitti_root: +* data_object/training/image_2 +* 2011_09_26/2011_09_26_drive_0032_sync/image_02/data +* 2011_09_26/2011_09_26_drive_0056_sync/image_02/data + +For Cityscapes the file structure in cityscapes_root is expected: +* leftImg8bit/train + +Note: relative path correspond to the -sequence parameter. + +## Citation +``` +@inproceedings{halder2019physics, + title={Physics-Based Rendering for Improving Robustness to Rain}, + author={Halder, Shirsendu Sukanta and Lalonde, Jean-Fran{\c{c}}ois and de Charette, Raoul}, + booktitle={IEEE/CVF International Conference on Computer Vision}, + year={2019} +} +``` + +## Troubleshooting +For any help, you may email Raoul de Charette (raoul.de-charette@inria.fr) \ No newline at end of file diff --git a/weather_dev_toolkit/cityscapes.py b/weather_dev_toolkit/cityscapes.py new file mode 100644 index 0000000..c7a7232 --- /dev/null +++ b/weather_dev_toolkit/cityscapes.py @@ -0,0 +1,26 @@ +###################################################################################################################### +# Halder, S. S., Lalonde, J. F., & de Charette, R. (2019). +# Physics-Based Rendering for Improving Robustness to Rain. IEEE/CVF International Conference on Computer Vision +# +# From: Computer Vision Group, RITS team, Inria +# License: MIT +###################################################################################################################### + +import os +import cv2 + +from dataset import Dataset + + +class Cityscapes(Dataset): + sequences = ['leftImg8bit/train', 'leftImg8bit/val'] + data = {"*": ["depth", "fog_transmittance", "rain_diff"], "leftImg8bit/val": ["rain_diff"]} + + def __init__(self, original_dir, output_dir, sequences=None): + if sequences is None: + sequences = Cityscapes.sequences + + super().__init__("cityscapes", original_dir, output_dir, sequences, Cityscapes.data) + + def transform_original_image(self, img): + return cv2.resize(img, (1024, 512), cv2.INTER_CUBIC) # Downscale original image diff --git a/weather_dev_toolkit/dataset.py b/weather_dev_toolkit/dataset.py new file mode 100644 index 0000000..82f5b72 --- /dev/null +++ b/weather_dev_toolkit/dataset.py @@ -0,0 +1,239 @@ +###################################################################################################################### +# Halder, S. S., Lalonde, J. F., & de Charette, R. (2019). +# Physics-Based Rendering for Improving Robustness to Rain. IEEE/CVF International Conference on Computer Vision +# +# From: Computer Vision Group, RITS team, Inria +# License: MIT +###################################################################################################################### + +import os +import urllib.request +import hashlib +import numpy as np +import cv2 +import glob +import shutil +from tqdm import tqdm +from zipfile import ZipFile + + +class Dataset: + HTTP_PATH = "https://www.rocq.inria.fr/rits_files/computer-vision/weather-augment/" + + def __init__(self, name, original_dir, output_dir, sequences, data): + self.original_name = name + self.name = "weather_"+self.original_name + self.original_dir = original_dir + self.output_dir = output_dir + self.sequences = sequences + self.data = data + self.checksums_link = Dataset.HTTP_PATH + "{}_checksums.txt".format(self.name) + self.downloaded_directory = os.path.join(output_dir, "downloaded") + self.datasets_directory = os.path.join(self.output_dir, "weather_datasets") + + self.links = [] + for sequence in self.sequences: + sequence_data = self.data[sequence] if sequence in self.data.keys() else self.data["*"] + sequence = sequence.replace("/", "_") + print(sequence_data) + for d in sequence_data: + self.links.append(Dataset.HTTP_PATH + "{}_{}_{}.zip".format(self.name, sequence, d)) + + print("Verifying {} integrity... {}".format(self.original_name, original_dir), end="") + for sequence in self.sequences: + if not os.path.isdir(original_dir): + raise NotADirectoryError("Original *{}* dataset directory doesn't exist: {}".format(self.original_name, original_dir)) + if not os.path.isdir(os.path.join(original_dir, sequence)): + raise NotADirectoryError("Original *{}* dataset file structure invalid. Please use correct file structure, directory %{}_root%/{} is missing. Or restrict sequences (--sequence).".format(self.original_name, self.original_name, sequence)) + print(" [OK]") + + + def _extract(self, archive, directory_to_extract): + with ZipFile(file=archive) as zip_file: + for file in tqdm(iterable=zip_file.namelist(), total=len(zip_file.namelist()), desc=" Extracting: " + os.path.basename(archive)): + zip_file.extract(member=file, path=directory_to_extract) + + class DownloadProgressBar(tqdm): + def update_to(self, b=1, bsize=1, tsize=None): + if tsize is not None: + self.total = tsize + self.update(b * bsize - self.n) + + def _download_url(self, url, output_path): + with self.DownloadProgressBar(unit='B', unit_scale=True, + miniters=1, desc=" Downloading: " + url.split('/')[-1]) as t: + urllib.request.urlretrieve(url, filename=output_path, reporthook=t.update_to) + + def _sha256sum(self, filename, blocksize=65536): + hash_sha256 = hashlib.sha256() + with tqdm(total=os.stat(filename).st_size, desc=" Calculating checksum: " + os.path.basename(filename), unit='B', unit_scale=True, unit_divisor=1024) as pbar: + with open(filename, "rb") as f: + block = 1 + while block: + block = f.read(blocksize) + if block: + pbar.update(len(block)) + hash_sha256.update(block) + + return hash_sha256.hexdigest() + + def download_and_extract_all(self, auto_remove=True): + os.makedirs(self.downloaded_directory, exist_ok=True) + os.makedirs(self.datasets_directory, exist_ok=True) + + # Get Checksums + checksum_file_name = os.path.join(self.downloaded_directory, self.checksums_link.split('/')[-1]) + self._download_url(self.checksums_link, checksum_file_name) + + self.checksums = {} + with open(checksum_file_name, 'r') as f: + for line in f: + value, key = line.split() + self.checksums[key] = value + + for url_idx, url in enumerate(self.links): + print(" {}, download [{}/{}]".format(self.name, url_idx + 1, len(self.links))) + self.download_and_extract(url, auto_remove) + + if auto_remove: + os.remove(checksum_file_name) + if len(os.listdir(self.downloaded_directory)) == 0: + os.rmdir(self.downloaded_directory) + + def download_and_extract(self, url, auto_remove=True): + downloaded_file_name = url.split('/')[-1] + path_to_download = os.path.join(self.downloaded_directory, downloaded_file_name) + + download_needed = True + + # Redownload this file only if the checksum is invalid + if os.path.isfile(path_to_download): + sha256sum = self._sha256sum(path_to_download) + if downloaded_file_name in self.checksums and self.checksums[downloaded_file_name] == sha256sum: + print("\nUsing pre-downloaded file") + download_needed = False + else: + print("\nSha256sum is invalid: {}. Will re-download".format(downloaded_file_name)) + + if download_needed: + self._download_url(url, path_to_download) + + self._extract(path_to_download, self.datasets_directory) + + if auto_remove: + os.remove(path_to_download) + + def transform_original_image(self, img): + raise NotImplementedError + + def generate_fog(self): + for seq_idx, sequence in enumerate(self.sequences): + sequence_data = self.data[sequence] if sequence in self.data.keys() else self.data["*"] + if "fog_transmittance" not in sequence_data: + print(sequence+", No fog data in this sequence.") + continue + + for_transmission_dir = os.path.join(self.datasets_directory, self.name, sequence, "fog_transmittance") + + if not os.path.isdir(for_transmission_dir): + raise NotADirectoryError("fog_transmittance folder doesn't exist") + + vmax_list = os.listdir(for_transmission_dir) + + fog_dir = os.path.join(self.datasets_directory, self.name, sequence, "fog") + + for vmax_idx, vmax in enumerate(vmax_list): + print(" {}, Sequence [{}/{}], Fog vmax {} [{}/{}]".format(self.name, seq_idx+1, len(self.sequences), vmax, vmax_idx+1, len(vmax_list))) + files = glob.glob(os.path.join(for_transmission_dir, vmax, "**/*.png"), recursive=True) + for fog_transmittance_path in tqdm(files, desc=" {}, Fog, {}, {}".format(self.name, vmax, sequence)): + relative_path_to_filename = fog_transmittance_path.replace(os.path.join(for_transmission_dir, vmax) + "/", "") + filename = os.path.basename(relative_path_to_filename) + sub_folders = relative_path_to_filename.replace(filename, "") + + original_file_path = os.path.join(self.original_dir, sequence, sub_folders, filename) + if not os.path.isfile(original_file_path): + print("File {} doesn't exist".format(original_file_path)) + continue + + img_clear = cv2.imread(original_file_path) + img_clear = self.transform_original_image(img_clear) + fog_output_dir = os.path.join(fog_dir, vmax, sub_folders) + + os.makedirs(fog_output_dir, exist_ok=True) + + fog_transmittance = cv2.imread(fog_transmittance_path, cv2.IMREAD_UNCHANGED) / 255. + + LInf = np.array([200, 200, 200]) # Atmosphere chromacity + + direct_trans_noise = img_clear * fog_transmittance + airlight_noise = LInf * (1 - fog_transmittance) + img_fog = direct_trans_noise + airlight_noise + img_fog = np.asarray(img_fog, dtype=np.uint8) + + cv2.imwrite(os.path.join(fog_output_dir, filename), img_fog) + + def generate_rain(self): + sequence_data = self.data["*"] + if "rain_diff" in sequence_data: + self.generate_rain_from_diff() + elif "rain" in sequence_data: + return # Nothing to do + else: + raise NotImplementedError + + def generate_rain_from_diff(self): + for seq_idx, sequence in enumerate(self.sequences): + rain_levels_dir = os.path.join(self.datasets_directory, self.name, sequence, "rain_diff") + + if not os.path.isdir(rain_levels_dir): + rain_levels_dir = os.path.join(self.datasets_directory, self.name, sequence, "rain") + if os.path.isdir(rain_levels_dir): + print(" Rain seems already generated in ({}). Skipping...".format(rain_levels_dir)) + continue + else: + raise NotADirectoryError("rain_diff folder doesn't exist") + + levels_list = os.listdir(rain_levels_dir) + + rain_dir = os.path.join(self.datasets_directory, self.name, sequence, "rain") + + for rain_idx, rain_level in enumerate(levels_list): + print(" {}, Sequence [{}/{}], Rain {} [{}/{}]".format(self.name, seq_idx+1, len(self.sequences), rain_level, rain_idx+1, len(levels_list))) + files = glob.glob(os.path.join(rain_levels_dir, rain_level, "rainy_image", "**/*.png"), recursive=True) + for rain_diff_path in tqdm(files, desc=" {}, Rain, {}, {}".format(self.name, rain_level, sequence)): + relative_path_to_filename = rain_diff_path.replace(os.path.join(rain_levels_dir, rain_level, "rainy_image") + "/", "") + + filename = os.path.basename(relative_path_to_filename) + sub_folders = relative_path_to_filename.replace(filename, "") + + rainy_image_output_dir = os.path.join(rain_dir, rain_level, "rainy_image", sub_folders) + rain_mask_output_dir = os.path.join(rain_dir, rain_level, "rain_mask", sub_folders) + + original_file_path = os.path.join(self.original_dir, sequence, sub_folders, filename) + + os.makedirs(rainy_image_output_dir, exist_ok=True) + + os.makedirs(rain_mask_output_dir, exist_ok=True) + + shutil.copyfile(os.path.join(rain_levels_dir, rain_level, "rain_mask", sub_folders, filename), + os.path.join(rain_mask_output_dir, filename)) + self._apply_diff(original_file_path, rain_diff_path, os.path.join(rainy_image_output_dir, filename)) + + for sequence in self.sequences: + rain_levels_dir = os.path.join(self.datasets_directory, self.name, sequence, "rain_diff") + + shutil.rmtree(rain_levels_dir, ignore_errors=True) + + # Apply differential image + def _apply_diff(self, raw_file, diff_file, output_file): + # Read images + clear_image = cv2.imread(raw_file) + diff_image = cv2.imread(diff_file, cv2.IMREAD_UNCHANGED) + clear_image = self.transform_original_image(clear_image) + + # Generate augmented image + clear_image = clear_image.astype(np.uint16) + diff_image = diff_image.astype(np.uint16) + augmented_image = (diff_image - 255).astype(np.int16) + clear_image + + cv2.imwrite(output_file, augmented_image.astype(np.uint8)) \ No newline at end of file diff --git a/weather_dev_toolkit/kitti.py b/weather_dev_toolkit/kitti.py new file mode 100644 index 0000000..8b62ccf --- /dev/null +++ b/weather_dev_toolkit/kitti.py @@ -0,0 +1,30 @@ +###################################################################################################################### +# Halder, S. S., Lalonde, J. F., & de Charette, R. (2019). +# Physics-Based Rendering for Improving Robustness to Rain. IEEE/CVF International Conference on Computer Vision +# +# From: Computer Vision Group, RITS team, Inria +# License: MIT +###################################################################################################################### + +import os + +from dataset import Dataset + + +class Kitti(Dataset): + sequences = ['data_object/training/image_2', 'raw_data/2011_09_26/2011_09_26_drive_0032_sync/image_02/data', 'raw_data/2011_09_26/2011_09_26_drive_0056_sync/image_02/data'] + data = {"*": ["depth", "fog_transmittance", "rain"]} + + def __init__(self, original_dir, output_dir, sequences=None): + if sequences is None: + sequences = Kitti.sequences + + super().__init__("kitti", original_dir, output_dir, sequences, Kitti.data) + + def transform_original_image(self, img): + # Crop-center original image + cropx, cropy = 1216, 352 + y, x, _ = img.shape + startx = x // 2 - (cropx // 2) + starty = y // 2 - (cropy // 2) + return img[starty:starty + cropy, startx:startx + cropx] diff --git a/weather_dev_toolkit/options.py b/weather_dev_toolkit/options.py new file mode 100644 index 0000000..a07c150 --- /dev/null +++ b/weather_dev_toolkit/options.py @@ -0,0 +1,44 @@ +###################################################################################################################### +# Halder, S. S., Lalonde, J. F., & de Charette, R. (2019). +# Physics-Based Rendering for Improving Robustness to Rain. IEEE/CVF International Conference on Computer Vision +# +# From: Computer Vision Group, RITS team, Inria +# License: MIT +###################################################################################################################### + +import os +from argparse import ArgumentParser +from kitti import Kitti +from cityscapes import Cityscapes + +file_dir = os.path.dirname(__file__) # the directory that options.py resides in + + +def parse(): + parser = ArgumentParser() + subparsers = parser.add_subparsers(dest="dataset") + subparsers.required = True + + # All + all_subparser = subparsers.add_parser('all', help='Actions for all datasets') + all_subparser.add_argument("--cityscapes_root", type=str, help="Original Cityscapes path", required=True) + all_subparser.add_argument("--kitti_root", type=str, help="Original Kitti path", required=True) + all_subparser.add_argument("--output_dir", type=str, default=os.path.join(file_dir, "../")) + all_subparser.add_argument("--weather", nargs='+', choices=['rain', 'fog'], default=['rain', 'fog']) + + # Cityscapes + cityscapes_subparser = subparsers.add_parser('cityscapes', help='Actions for Cityscapes') + cityscapes_subparser.add_argument("--cityscapes_root", type=str, help="Original cityscapes path", required=True) + cityscapes_subparser.add_argument("--output_dir", type=str, default=os.path.join(file_dir, "../")) + cityscapes_subparser.add_argument("--weather", nargs='+', choices=['rain', 'fog'], default=['rain', 'fog']) + cityscapes_subparser.add_argument("--sequence", nargs='+', choices=Cityscapes.sequences, default=Cityscapes.sequences) + + # Kitti + kitti_subparser = subparsers.add_parser('kitti', help='Actions for Kitti') + kitti_subparser.add_argument("--kitti_root", type=str, help="Original Kitti path", required=True) + kitti_subparser.add_argument("--output_dir", type=str, default=os.path.join(file_dir, "../")) + kitti_subparser.add_argument("--weather", nargs='+', choices=['rain', 'fog'], default=['rain', 'fog']) + kitti_subparser.add_argument("--sequence", nargs='+', choices=Kitti.sequences, default=Kitti.sequences) + + + return parser.parse_args() diff --git a/weather_dev_toolkit/requirements.txt b/weather_dev_toolkit/requirements.txt new file mode 100644 index 0000000..7b32f69 --- /dev/null +++ b/weather_dev_toolkit/requirements.txt @@ -0,0 +1,3 @@ +opencv-python +numpy +tqdm diff --git a/weather_dev_toolkit/weather_download-generate.py b/weather_dev_toolkit/weather_download-generate.py new file mode 100644 index 0000000..d5f4c32 --- /dev/null +++ b/weather_dev_toolkit/weather_download-generate.py @@ -0,0 +1,54 @@ +###################################################################################################################### +# This file will download and generate foggy and rainy images of Kitti and Cityscapes datasets +# Usage: +# python weather_download-generate.py [all|kitti|cityscapes] --kitti_root %PATH% --cityscapes_root %PATH% +# +# Example: +# python weather_download-generate.py all --kitti_root /datasets/Kitti --cityscapes_root /datasets/cityscapes +# Or: +# python weather_download-generate.py kitti --kitti_root /datasets/Kitti +# Or: +# python weather_download-generate.py cityscapes --cityscapes_root /datasets/cityscapes +# +# Use python weather_download-generate.py [all|kitti|cityscapes] -h for more information +###################################################################################################################### +# Halder, S. S., Lalonde, J. F., & de Charette, R. (2019). +# Physics-Based Rendering for Improving Robustness to Rain. IEEE/CVF International Conference on Computer Vision +# +# From: Computer Vision Group, RITS team, Inria +# License: MIT +# +# News 2022-08-01: Fixed a download bug and added cityscapes validation set (rain only) +###################################################################################################################### + +import options +from kitti import Kitti +from cityscapes import Cityscapes + + +def main(args): + if args.dataset == "cityscapes": + dataset_list = [Cityscapes(args.cityscapes_root, args.output_dir, args.sequence)] + elif args.dataset == "kitti": + dataset_list = [Kitti(args.kitti_root, args.output_dir, args.sequence)] + else: + dataset_list = [Cityscapes(args.cityscapes_root, args.output_dir), + Kitti(args.kitti_root, args.output_dir)] + + for i, dataset in enumerate(dataset_list): + print("Dataset {} [{}/{}]".format(dataset.name, i+1, len(dataset_list))) + dataset.download_and_extract_all() + + if 'rain' in args.weather: + print(" {}, Rain".format(dataset.name)) + dataset.generate_rain() + + if 'fog' in args.weather: + print(" {}, Fog".format(dataset.name)) + dataset.generate_fog() + + print("[DONE]") + print("Check folder: {}".format(dataset_list[0].datasets_directory)) + +if __name__ == '__main__': + main(options.parse()) diff --git a/weather_dev_toolkit/weather_generate.py b/weather_dev_toolkit/weather_generate.py new file mode 100644 index 0000000..07b2308 --- /dev/null +++ b/weather_dev_toolkit/weather_generate.py @@ -0,0 +1,49 @@ +###################################################################################################################### +# This file will generate foggy and rainy images of Kitti and Cityscapes datasets +# Usage: +# python weather_generate.py [all|kitti|cityscapes] --kitti_root %PATH% --cityscapes_root %PATH% +# +# Example: +# python weather_generate.py all --kitti_root /datasets/Kitti --cityscapes_root /datasets/cityscapes +# Or: +# python weather_generate.py kitti --kitti_root /datasets/Kitti +# Or: +# python weather_generate.py cityscapes --cityscapes_root /datasets/cityscapes +# +# Use python weather_generate.py [all|kitti|cityscapes] -h for more information +###################################################################################################################### +# Halder, S. S., Lalonde, J. F., & de Charette, R. (2019). +# Physics-Based Rendering for Improving Robustness to Rain. IEEE/CVF International Conference on Computer Vision +# +# From: Computer Vision Group, RITS team, Inria +# License: MIT +###################################################################################################################### + +import options +from kitti import Kitti +from cityscapes import Cityscapes + + +def main(args): + if args.dataset == "cityscapes": + dataset_list = [Cityscapes(args.cityscapes_root, args.output_dir, args.sequence)] + elif args.dataset == "kitti": + dataset_list = [Kitti(args.kitti_root, args.output_dir, args.sequence)] + else: + dataset_list = [Cityscapes(args.cityscapes_root, args.output_dir), + Kitti(args.kitti_root, args.output_dir)] + + for i, dataset in enumerate(dataset_list): + print("Dataset {} [{}/{}]".format(dataset.name, i+1, len(dataset_list))) + + if 'rain' in args.weather: + print(" {}, Rain".format(dataset.name)) + dataset.generate_rain() + + if 'fog' in args.weather: + print(" {}, Fog".format(dataset.name)) + dataset.generate_fog() + + +if __name__ == '__main__': + main(options.parse())