Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Removes deprecated arguments and parameters from v4 #5968

Merged
merged 29 commits into from
Oct 18, 2023
Merged
Show file tree
Hide file tree
Changes from 25 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .changeset/spicy-streets-stop.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
---
"gradio": minor
---

feat:Removes deprecated arguments and parameters from v4
16 changes: 7 additions & 9 deletions client/python/test/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -272,13 +272,13 @@ def hello_world_with_group():
gr.Textbox("Hello!")

def greeting(name):
return f"Hello {name}", gr.Group.update(visible=True)
return f"Hello {name}", gr.Group(visible=True)

greet.click(
greeting, inputs=[name], outputs=[output, group], api_name="greeting"
)
show_group.click(
lambda: gr.Group.update(visible=False), None, group, api_name="show_group"
lambda: gr.Group(visible=False), None, group, api_name="show_group"
)
return demo

Expand All @@ -300,7 +300,7 @@ def hello_world_with_state_and_accordion():

def greeting(name, state):
state += 1
return state, f"Hello {name}", state, gr.Accordion.update(open=False)
return state, f"Hello {name}", state, gr.Accordion(open=False)

greet.click(
greeting,
Expand All @@ -309,13 +309,13 @@ def greeting(name, state):
api_name="greeting",
)
open_acc.click(
lambda state: (state + 1, state + 1, gr.Accordion.update(open=True)),
lambda state: (state + 1, state + 1, gr.Accordion(open=True)),
[n_counts],
[n_counts, num, accordion],
api_name="open",
)
close_acc.click(
lambda state: (state + 1, state + 1, gr.Accordion.update(open=False)),
lambda state: (state + 1, state + 1, gr.Accordion(open=False)),
[n_counts],
[n_counts, num, accordion],
api_name="close",
Expand Down Expand Up @@ -350,9 +350,7 @@ def _stream_audio(audio_file):

@pytest.fixture
def video_component():
return gr.Interface(
fn=lambda x: x, inputs=gr.Video(type="file"), outputs=gr.Video()
)
return gr.Interface(fn=lambda x: x, inputs=gr.Video(), outputs=gr.Video())


@pytest.fixture
Expand All @@ -368,7 +366,7 @@ def all_components():
classes_to_check.extend(children)
if (
"value" in inspect.signature(subclass).parameters
and subclass != gr.components.IOComponent
and subclass != gr.components.Component
and not getattr(subclass, "is_template", False)
):
subclasses.append(subclass)
Expand Down
2 changes: 1 addition & 1 deletion demo/depth_estimation/run.ipynb
Original file line number Diff line number Diff line change
@@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: depth_estimation\n", "### A demo for predicting the depth of an image and generating a 3D model of it.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch git+https://github.com/nielsrogge/transformers.git@add_dpt_redesign#egg=transformers numpy Pillow jinja2 open3d"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('examples')\n", "!wget -q -O examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg https://github.com/gradio-app/gradio/raw/main/demo/depth_estimation/examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/depth_estimation/packages.txt"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import DPTFeatureExtractor, DPTForDepthEstimation\n", "import torch\n", "import numpy as np\n", "from PIL import Image\n", "import open3d as o3d\n", "from pathlib import Path\n", "\n", "feature_extractor = DPTFeatureExtractor.from_pretrained(\"Intel/dpt-large\")\n", "model = DPTForDepthEstimation.from_pretrained(\"Intel/dpt-large\")\n", "\n", "def process_image(image_path):\n", " image_path = Path(image_path)\n", " image_raw = Image.open(image_path)\n", " image = image_raw.resize(\n", " (800, int(800 * image_raw.size[1] / image_raw.size[0])),\n", " Image.Resampling.LANCZOS)\n", "\n", " # prepare image for the model\n", " encoding = feature_extractor(image, return_tensors=\"pt\")\n", "\n", " # forward pass\n", " with torch.no_grad():\n", " outputs = model(**encoding)\n", " predicted_depth = outputs.predicted_depth\n", "\n", " # interpolate to original size\n", " prediction = torch.nn.functional.interpolate(\n", " predicted_depth.unsqueeze(1),\n", " size=image.size[::-1],\n", " mode=\"bicubic\",\n", " align_corners=False,\n", " ).squeeze()\n", " output = prediction.cpu().numpy()\n", " depth_image = (output * 255 / np.max(output)).astype('uint8')\n", " try:\n", " gltf_path = create_3d_obj(np.array(image), depth_image, image_path)\n", " img = Image.fromarray(depth_image)\n", " return [img, gltf_path, gltf_path]\n", " except Exception:\n", " gltf_path = create_3d_obj(\n", " np.array(image), depth_image, image_path, depth=8)\n", " img = Image.fromarray(depth_image)\n", " return [img, gltf_path, gltf_path]\n", " except:\n", " print(\"Error reconstructing 3D model\")\n", " raise Exception(\"Error reconstructing 3D model\")\n", "\n", "\n", "def create_3d_obj(rgb_image, depth_image, image_path, depth=10):\n", " depth_o3d = o3d.geometry.Image(depth_image)\n", " image_o3d = o3d.geometry.Image(rgb_image)\n", " rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(\n", " image_o3d, depth_o3d, convert_rgb_to_intensity=False)\n", " w = int(depth_image.shape[1])\n", " h = int(depth_image.shape[0])\n", "\n", " camera_intrinsic = o3d.camera.PinholeCameraIntrinsic()\n", " camera_intrinsic.set_intrinsics(w, h, 500, 500, w/2, h/2)\n", "\n", " pcd = o3d.geometry.PointCloud.create_from_rgbd_image(\n", " rgbd_image, camera_intrinsic)\n", "\n", " print('normals')\n", " pcd.normals = o3d.utility.Vector3dVector(\n", " np.zeros((1, 3))) # invalidate existing normals\n", " pcd.estimate_normals(\n", " search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.01, max_nn=30))\n", " pcd.orient_normals_towards_camera_location(\n", " camera_location=np.array([0., 0., 1000.]))\n", " pcd.transform([[1, 0, 0, 0],\n", " [0, -1, 0, 0],\n", " [0, 0, -1, 0],\n", " [0, 0, 0, 1]])\n", " pcd.transform([[-1, 0, 0, 0],\n", " [0, 1, 0, 0],\n", " [0, 0, 1, 0],\n", " [0, 0, 0, 1]])\n", "\n", " print('run Poisson surface reconstruction')\n", " with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug):\n", " mesh_raw, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(\n", " pcd, depth=depth, width=0, scale=1.1, linear_fit=True)\n", "\n", " voxel_size = max(mesh_raw.get_max_bound() - mesh_raw.get_min_bound()) / 256\n", " print(f'voxel_size = {voxel_size:e}')\n", " mesh = mesh_raw.simplify_vertex_clustering(\n", " voxel_size=voxel_size,\n", " contraction=o3d.geometry.SimplificationContraction.Average)\n", "\n", " # vertices_to_remove = densities < np.quantile(densities, 0.001)\n", " # mesh.remove_vertices_by_mask(vertices_to_remove)\n", " bbox = pcd.get_axis_aligned_bounding_box()\n", " mesh_crop = mesh.crop(bbox)\n", " gltf_path = f'./{image_path.stem}.gltf'\n", " o3d.io.write_triangle_mesh(\n", " gltf_path, mesh_crop, write_triangle_uvs=True)\n", " return gltf_path\n", "\n", "title = \"Demo: zero-shot depth estimation with DPT + 3D Point Cloud\"\n", "description = \"This demo is a variation from the original <a href='https://huggingface.co/spaces/nielsr/dpt-depth-estimation' target='_blank'>DPT Demo</a>. It uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object.\"\n", "examples = [[\"examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg\"]]\n", "\n", "iface = gr.Interface(fn=process_image,\n", " inputs=[gr.Image(\n", " type=\"filepath\", label=\"Input Image\")],\n", " outputs=[gr.Image(label=\"predicted depth\", type=\"pil\"),\n", " gr.Model3D(label=\"3d mesh reconstruction\", clear_color=[\n", " 1.0, 1.0, 1.0, 1.0]),\n", " gr.File(label=\"3d gLTF\")],\n", " title=title,\n", " description=description,\n", " examples=examples,\n", " allow_flagging=\"never\",\n", " cache_examples=False)\n", "\n", "iface.launch(debug=True, enable_queue=False)"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: depth_estimation\n", "### A demo for predicting the depth of an image and generating a 3D model of it.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio torch git+https://github.com/nielsrogge/transformers.git@add_dpt_redesign#egg=transformers numpy Pillow jinja2 open3d"]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('examples')\n", "!wget -q -O examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg https://github.com/gradio-app/gradio/raw/main/demo/depth_estimation/examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/depth_estimation/packages.txt"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import DPTFeatureExtractor, DPTForDepthEstimation\n", "import torch\n", "import numpy as np\n", "from PIL import Image\n", "import open3d as o3d\n", "from pathlib import Path\n", "\n", "feature_extractor = DPTFeatureExtractor.from_pretrained(\"Intel/dpt-large\")\n", "model = DPTForDepthEstimation.from_pretrained(\"Intel/dpt-large\")\n", "\n", "def process_image(image_path):\n", " image_path = Path(image_path)\n", " image_raw = Image.open(image_path)\n", " image = image_raw.resize(\n", " (800, int(800 * image_raw.size[1] / image_raw.size[0])),\n", " Image.Resampling.LANCZOS)\n", "\n", " # prepare image for the model\n", " encoding = feature_extractor(image, return_tensors=\"pt\")\n", "\n", " # forward pass\n", " with torch.no_grad():\n", " outputs = model(**encoding)\n", " predicted_depth = outputs.predicted_depth\n", "\n", " # interpolate to original size\n", " prediction = torch.nn.functional.interpolate(\n", " predicted_depth.unsqueeze(1),\n", " size=image.size[::-1],\n", " mode=\"bicubic\",\n", " align_corners=False,\n", " ).squeeze()\n", " output = prediction.cpu().numpy()\n", " depth_image = (output * 255 / np.max(output)).astype('uint8')\n", " try:\n", " gltf_path = create_3d_obj(np.array(image), depth_image, image_path)\n", " img = Image.fromarray(depth_image)\n", " return [img, gltf_path, gltf_path]\n", " except Exception:\n", " gltf_path = create_3d_obj(\n", " np.array(image), depth_image, image_path, depth=8)\n", " img = Image.fromarray(depth_image)\n", " return [img, gltf_path, gltf_path]\n", " except:\n", " print(\"Error reconstructing 3D model\")\n", " raise Exception(\"Error reconstructing 3D model\")\n", "\n", "\n", "def create_3d_obj(rgb_image, depth_image, image_path, depth=10):\n", " depth_o3d = o3d.geometry.Image(depth_image)\n", " image_o3d = o3d.geometry.Image(rgb_image)\n", " rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(\n", " image_o3d, depth_o3d, convert_rgb_to_intensity=False)\n", " w = int(depth_image.shape[1])\n", " h = int(depth_image.shape[0])\n", "\n", " camera_intrinsic = o3d.camera.PinholeCameraIntrinsic()\n", " camera_intrinsic.set_intrinsics(w, h, 500, 500, w/2, h/2)\n", "\n", " pcd = o3d.geometry.PointCloud.create_from_rgbd_image(\n", " rgbd_image, camera_intrinsic)\n", "\n", " print('normals')\n", " pcd.normals = o3d.utility.Vector3dVector(\n", " np.zeros((1, 3))) # invalidate existing normals\n", " pcd.estimate_normals(\n", " search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.01, max_nn=30))\n", " pcd.orient_normals_towards_camera_location(\n", " camera_location=np.array([0., 0., 1000.]))\n", " pcd.transform([[1, 0, 0, 0],\n", " [0, -1, 0, 0],\n", " [0, 0, -1, 0],\n", " [0, 0, 0, 1]])\n", " pcd.transform([[-1, 0, 0, 0],\n", " [0, 1, 0, 0],\n", " [0, 0, 1, 0],\n", " [0, 0, 0, 1]])\n", "\n", " print('run Poisson surface reconstruction')\n", " with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug):\n", " mesh_raw, densities = o3d.geometry.TriangleMesh.create_from_point_cloud_poisson(\n", " pcd, depth=depth, width=0, scale=1.1, linear_fit=True)\n", "\n", " voxel_size = max(mesh_raw.get_max_bound() - mesh_raw.get_min_bound()) / 256\n", " print(f'voxel_size = {voxel_size:e}')\n", " mesh = mesh_raw.simplify_vertex_clustering(\n", " voxel_size=voxel_size,\n", " contraction=o3d.geometry.SimplificationContraction.Average)\n", "\n", " # vertices_to_remove = densities < np.quantile(densities, 0.001)\n", " # mesh.remove_vertices_by_mask(vertices_to_remove)\n", " bbox = pcd.get_axis_aligned_bounding_box()\n", " mesh_crop = mesh.crop(bbox)\n", " gltf_path = f'./{image_path.stem}.gltf'\n", " o3d.io.write_triangle_mesh(\n", " gltf_path, mesh_crop, write_triangle_uvs=True)\n", " return gltf_path\n", "\n", "title = \"Demo: zero-shot depth estimation with DPT + 3D Point Cloud\"\n", "description = \"This demo is a variation from the original <a href='https://huggingface.co/spaces/nielsr/dpt-depth-estimation' target='_blank'>DPT Demo</a>. It uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object.\"\n", "examples = [[\"examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg\"]]\n", "\n", "iface = gr.Interface(fn=process_image,\n", " inputs=[gr.Image(\n", " type=\"filepath\", label=\"Input Image\")],\n", " outputs=[gr.Image(label=\"predicted depth\", type=\"pil\"),\n", " gr.Model3D(label=\"3d mesh reconstruction\", clear_color=[\n", " 1.0, 1.0, 1.0, 1.0]),\n", " gr.File(label=\"3d gLTF\")],\n", " title=title,\n", " description=description,\n", " examples=examples,\n", " allow_flagging=\"never\",\n", " cache_examples=False)\n", "\n", "iface.launch(debug=True)"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
2 changes: 1 addition & 1 deletion demo/depth_estimation/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,4 +114,4 @@ def create_3d_obj(rgb_image, depth_image, image_path, depth=10):
allow_flagging="never",
cache_examples=False)

iface.launch(debug=True, enable_queue=False)
iface.launch(debug=True)
2 changes: 1 addition & 1 deletion demo/video_component/run.ipynb
Original file line number Diff line number Diff line change
@@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: video_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/a.mp4 https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/a.mp4\n", "!wget -q -O files/b.mp4 https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/b.mp4\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "\n", "a = os.path.join(os.path.abspath(''), \"files/world.mp4\") # Video\n", "b = os.path.join(os.path.abspath(''), \"files/a.mp4\") # Video\n", "c = os.path.join(os.path.abspath(''), \"files/b.mp4\") # Video\n", "\n", "\n", "demo = gr.Interface(\n", " fn=lambda x: x,\n", " inputs=gr.Video(type=\"file\"),\n", " outputs=gr.Video(),\n", " examples=[\n", " [a],\n", " [b],\n", " [c],\n", " ],\n", " cache_examples=True\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: video_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/a.mp4 https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/a.mp4\n", "!wget -q -O files/b.mp4 https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/b.mp4\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "\n", "a = os.path.join(os.path.abspath(''), \"files/world.mp4\") # Video\n", "b = os.path.join(os.path.abspath(''), \"files/a.mp4\") # Video\n", "c = os.path.join(os.path.abspath(''), \"files/b.mp4\") # Video\n", "\n", "\n", "demo = gr.Interface(\n", " fn=lambda x: x,\n", " inputs=gr.Video(),\n", " outputs=gr.Video(),\n", " examples=[\n", " [a],\n", " [b],\n", " [c],\n", " ],\n", " cache_examples=True\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
2 changes: 1 addition & 1 deletion demo/video_component/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

demo = gr.Interface(
fn=lambda x: x,
inputs=gr.Video(type="file"),
inputs=gr.Video(),
outputs=gr.Video(),
examples=[
[a],
Expand Down
Loading
Loading