Shop integration
This commit is contained in:
10
product-scroll-poc/fragmentShader.glsl
Normal file
10
product-scroll-poc/fragmentShader.glsl
Normal file
@@ -0,0 +1,10 @@
|
||||
|
||||
varying vec2 vUv;
|
||||
varying float vDisplacement;
|
||||
|
||||
uniform sampler2D tImage;
|
||||
|
||||
void main() {
|
||||
vec4 color = texture2D(tImage, vUv);
|
||||
gl_FragColor = color;
|
||||
}
|
||||
@@ -89,8 +89,21 @@
|
||||
</div>
|
||||
</section>
|
||||
|
||||
<!-- Parallax Section -->
|
||||
<section class="parallax-section">
|
||||
<div class="parallax-sticky">
|
||||
<canvas id="parallax-canvas"></canvas>
|
||||
<div class="product-reveal">
|
||||
<img src="pottery-vase.png" alt="Finished Product" id="final-product-img">
|
||||
</div>
|
||||
</div>
|
||||
<div class="parallax-trigger"></div>
|
||||
</section>
|
||||
|
||||
</div>
|
||||
|
||||
<!-- Three.js from CDN -->
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>
|
||||
<script src="script.js"></script>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
BIN
product-scroll-poc/pottery-vase.png
Normal file
BIN
product-scroll-poc/pottery-vase.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 701 KiB |
@@ -27,7 +27,7 @@ video.addEventListener("loadedmetadata", () => {
|
||||
|
||||
async function startBuffering() {
|
||||
video.currentTime = 0;
|
||||
video.playbackRate = 1; // Standard speed for better capture quality
|
||||
video.playbackRate = 4.0; // Increased speed for faster loading
|
||||
await video.play();
|
||||
|
||||
function capture() {
|
||||
@@ -135,3 +135,151 @@ function initScrollAnimation() {
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
// --- 3D Parallax Implementation ---
|
||||
|
||||
async function initParallax() {
|
||||
// Wait for generic window load to ensure Three.js is ready
|
||||
if (typeof THREE === 'undefined') {
|
||||
console.warn("Three.js not loaded yet. Retrying...");
|
||||
requestAnimationFrame(initParallax);
|
||||
return;
|
||||
}
|
||||
|
||||
const parallaxCanvas = document.querySelector("#parallax-canvas");
|
||||
if (!parallaxCanvas) return;
|
||||
|
||||
// SCENE SETUP
|
||||
const scene = new THREE.Scene();
|
||||
const camera = new THREE.PerspectiveCamera(75, window.innerWidth / window.innerHeight, 0.1, 1000);
|
||||
const renderer = new THREE.WebGLRenderer({ canvas: parallaxCanvas, alpha: true, antialias: true });
|
||||
|
||||
renderer.setSize(window.innerWidth, window.innerHeight);
|
||||
renderer.setPixelRatio(Math.min(window.devicePixelRatio, 2));
|
||||
|
||||
// TEXTURE LOADER
|
||||
const textureLoader = new THREE.TextureLoader();
|
||||
|
||||
// Load textures
|
||||
// Note: Assuming these files exist in the same directory/public folder
|
||||
const [originalTexture, depthTexture] = await Promise.all([
|
||||
new Promise(resolve => textureLoader.load('workshop.jpg', resolve)),
|
||||
new Promise(resolve => textureLoader.load('workshop_depth.png', resolve))
|
||||
]);
|
||||
|
||||
// GEOMETRY & MATERIAL
|
||||
const geometry = new THREE.PlaneGeometry(16, 9, 128, 128); // Increased segments for smoother displacement
|
||||
|
||||
const material = new THREE.ShaderMaterial({
|
||||
uniforms: {
|
||||
tImage: { value: originalTexture },
|
||||
tDepth: { value: depthTexture },
|
||||
uDepthScale: { value: 3.0 }, // Exaggerated depth
|
||||
uMouse: { value: new THREE.Vector2(0, 0) },
|
||||
uScroll: { value: 0 }
|
||||
},
|
||||
vertexShader: `
|
||||
varying vec2 vUv;
|
||||
varying float vDisplacement;
|
||||
|
||||
uniform sampler2D tDepth;
|
||||
uniform float uDepthScale;
|
||||
uniform vec2 uMouse;
|
||||
|
||||
void main() {
|
||||
vUv = uv;
|
||||
|
||||
float depth = texture2D(tDepth, uv).r;
|
||||
vDisplacement = depth;
|
||||
|
||||
vec3 newPosition = position;
|
||||
|
||||
// Displace along Z
|
||||
newPosition.z += depth * uDepthScale;
|
||||
|
||||
// Mouse Parallax (Simulate perspective shift)
|
||||
// Closer objects (light depth) move more than far objects
|
||||
newPosition.x += (uMouse.x * depth * 0.5);
|
||||
newPosition.y += (uMouse.y * depth * 0.5);
|
||||
|
||||
gl_Position = projectionMatrix * modelViewMatrix * vec4(newPosition, 1.0);
|
||||
}
|
||||
`,
|
||||
fragmentShader: `
|
||||
varying vec2 vUv;
|
||||
uniform sampler2D tImage;
|
||||
|
||||
void main() {
|
||||
gl_FragColor = texture2D(tImage, vUv);
|
||||
}
|
||||
`,
|
||||
side: THREE.DoubleSide
|
||||
});
|
||||
|
||||
const mesh = new THREE.Mesh(geometry, material);
|
||||
scene.add(mesh);
|
||||
|
||||
camera.position.z = 5;
|
||||
|
||||
// MOUSE INTERACTION
|
||||
window.addEventListener("mousemove", (e) => {
|
||||
const x = (e.clientX / window.innerWidth) * 2 - 1;
|
||||
const y = -(e.clientY / window.innerHeight) * 2 + 1;
|
||||
|
||||
// Smooth lerp could be better, but direct set for responsiveness
|
||||
gsap.to(material.uniforms.uMouse.value, {
|
||||
x: x * 0.5, // Sensitivity
|
||||
y: y * 0.5,
|
||||
duration: 1,
|
||||
ease: "power2.out"
|
||||
});
|
||||
});
|
||||
|
||||
// RESIZE HANDLER
|
||||
function handleResize() {
|
||||
const videoAspect = 16 / 9;
|
||||
const windowAspect = window.innerWidth / window.innerHeight;
|
||||
|
||||
camera.aspect = windowAspect;
|
||||
camera.updateProjectionMatrix();
|
||||
renderer.setSize(window.innerWidth, window.innerHeight);
|
||||
|
||||
// Cover logic
|
||||
if (windowAspect < videoAspect) {
|
||||
mesh.scale.set(videoAspect / windowAspect, 1, 1);
|
||||
} else {
|
||||
mesh.scale.set(1, windowAspect / videoAspect, 1);
|
||||
}
|
||||
}
|
||||
window.addEventListener('resize', handleResize);
|
||||
handleResize(); // Initial call
|
||||
|
||||
// SCROLL ANIMATION (GSAP)
|
||||
const tl = gsap.timeline({
|
||||
scrollTrigger: {
|
||||
trigger: ".parallax-section",
|
||||
start: "top top",
|
||||
end: "bottom bottom",
|
||||
scrub: true
|
||||
}
|
||||
});
|
||||
|
||||
tl.to(camera.position, {
|
||||
z: 3.5,
|
||||
ease: "none"
|
||||
}, 0);
|
||||
|
||||
// Fade out to reveal product
|
||||
tl.to(".product-reveal", { opacity: 1, duration: 0.2 }, 0.9);
|
||||
tl.to(parallaxCanvas, { opacity: 0, duration: 0.2 }, 0.95);
|
||||
|
||||
function animate() {
|
||||
requestAnimationFrame(animate);
|
||||
renderer.render(scene, camera);
|
||||
}
|
||||
animate();
|
||||
}
|
||||
|
||||
// Start
|
||||
initParallax();
|
||||
|
||||
20
product-scroll-poc/vertexShader.glsl
Normal file
20
product-scroll-poc/vertexShader.glsl
Normal file
@@ -0,0 +1,20 @@
|
||||
|
||||
varying vec2 vUv;
|
||||
varying float vDisplacement;
|
||||
|
||||
uniform sampler2D tDepth;
|
||||
uniform float uDepthScale;
|
||||
|
||||
void main() {
|
||||
vUv = uv;
|
||||
|
||||
// Read depth from texture
|
||||
float depth = texture2D(tDepth, uv).r;
|
||||
vDisplacement = depth;
|
||||
|
||||
// Displace z position based on depth
|
||||
vec3 newPosition = position;
|
||||
newPosition.z += depth * uDepthScale;
|
||||
|
||||
gl_Position = projectionMatrix * modelViewMatrix * vec4(newPosition, 1.0);
|
||||
}
|
||||
96
product-scroll-poc/video_remove_bg.py
Normal file
96
product-scroll-poc/video_remove_bg.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
import os
|
||||
from rembg import remove
|
||||
from PIL import Image
|
||||
|
||||
def process_video(input_path, output_path):
|
||||
# Check if input exists
|
||||
if not os.path.exists(input_path):
|
||||
print(f"Error: Input file '{input_path}' not found.")
|
||||
return
|
||||
|
||||
print(f"Processing video: {input_path}")
|
||||
|
||||
cap = cv2.VideoCapture(input_path)
|
||||
|
||||
# Get video properties
|
||||
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
||||
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
fps = cap.get(cv2.CAP_PROP_FPS)
|
||||
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
||||
|
||||
print(f"Resolution: {width}x{height}, FPS: {fps}, Total Frames: {total_frames}")
|
||||
|
||||
# Initialize video writer
|
||||
# Using 'mp4v' codec for MP4. Note that standard MP4 does not support alpha channel easily.
|
||||
# For web transparency, we usually need WebM with VP9 or a specific MOV codec (ProRes 4444).
|
||||
# Here we will try to create a WebM file (VP9) which supports alpha.
|
||||
|
||||
fourcc = cv2.VideoWriter_fourcc(*'VP90')
|
||||
output_ext = os.path.splitext(output_path)[1].lower()
|
||||
|
||||
if output_ext == '.mp4':
|
||||
print("Warning: MP4 container often doesn't support alpha transparency widely. Switching codec might be needed.")
|
||||
# Try mp4v just in case, but alpha might be lost or black
|
||||
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
||||
elif output_ext == '.webm':
|
||||
fourcc = cv2.VideoWriter_fourcc(*'VP90')
|
||||
|
||||
out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
|
||||
|
||||
frame_count = 0
|
||||
|
||||
try:
|
||||
while True:
|
||||
ret, frame = cap.read()
|
||||
if not ret:
|
||||
break
|
||||
|
||||
# Convert BGR (OpenCV) to RGB (PIL/rembg)
|
||||
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
||||
pil_im = Image.fromarray(frame_rgb)
|
||||
|
||||
# Remove background using rembg
|
||||
output_pil = remove(pil_im)
|
||||
|
||||
# Convert back to numpy
|
||||
output_np = np.array(output_pil)
|
||||
|
||||
# Convert RGB to BGR for OpenCV handling (if we were saving a normal video)
|
||||
# But wait, OpenCV VideoWriter expects BGR.
|
||||
# If we want transparency, we need a 4-channel write.
|
||||
# Standard cv2.VideoWriter might struggle with 4 channels depending on backend.
|
||||
|
||||
# Let's check if the output has alpha
|
||||
if output_np.shape[2] == 4:
|
||||
# If we are writing to a format that supports alpha (like VP9 WebM), we should pass the alpha.
|
||||
# However, basic cv2 VideoWriter might not support 4 channels.
|
||||
# A safer bet for a simple script is to save as a sequence of PNGs or find a writer that supports it.
|
||||
# For this PoC, let's try writing the frame.
|
||||
|
||||
# If VideoWriter fails with 4 channels, we fallback to black background.
|
||||
frame_bgr_alpha = cv2.cvtColor(output_np, cv2.COLOR_RGBA2BGRA)
|
||||
out.write(frame_bgr_alpha)
|
||||
else:
|
||||
frame_bgr = cv2.cvtColor(output_np, cv2.COLOR_RGBA2BGR)
|
||||
out.write(frame_bgr)
|
||||
|
||||
frame_count += 1
|
||||
if frame_count % 10 == 0:
|
||||
print(f"Processed {frame_count}/{total_frames} frames...")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing frames: {e}")
|
||||
|
||||
finally:
|
||||
cap.release()
|
||||
out.release()
|
||||
print("Done.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
# Ensure dependencies: pip install rembg opencv-python pillow numpy
|
||||
input_file = "cup_spin.mp4"
|
||||
output_file = "cup_spin_no_bg.webm" # using webm for transparency support
|
||||
|
||||
process_video(input_file, output_file)
|
||||
BIN
product-scroll-poc/workshop.jpg
Normal file
BIN
product-scroll-poc/workshop.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 359 KiB |
BIN
product-scroll-poc/workshop_depth.png
Normal file
BIN
product-scroll-poc/workshop_depth.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 490 KiB |
Reference in New Issue
Block a user