2023年11月20日月曜日

奥の物体を描画しなくなるシェーダー(VR,MR向け)

 Shader "Custom/OcclusionShader" {

    Properties{

        _Color("Main Color", Color) = (1,1,1,1)

    }

        SubShader{

            Tags { "RenderType" = "Opaque" }

            LOD 100


            Pass {

                Stencil {

                    Ref 1

                    Comp always

                    Pass replace

                }


                CGPROGRAM

                #pragma vertex vert

                #pragma fragment frag

                #include "UnityCG.cginc"


                struct appdata {

                    float4 vertex : POSITION;

                    float3 normal : NORMAL;

                    UNITY_VERTEX_INPUT_INSTANCE_ID 

                };


                struct v2f {

                    float4 pos : SV_POSITION;

                    UNITY_VERTEX_OUTPUT_STEREO 

                };


                fixed4 _Color;


                v2f vert(appdata v) {

                    v2f o;

                    UNITY_SETUP_INSTANCE_ID(v); 

                    UNITY_INITIALIZE_OUTPUT(v2f, o); 

                    UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(o); 


                    o.pos = UnityObjectToClipPos(v.vertex);

                    return o;

                }


                fixed4 frag(v2f i) : SV_Target {

                    return fixed4(0,0,0,0); // Render invisible color

                }

                ENDCG

            }


            Pass {

                Stencil {

                    Ref 1

                    Comp notequal

                    Pass keep

                }


                CGPROGRAM

                #pragma vertex vert

                #pragma fragment frag

                #include "UnityCG.cginc"


                struct appdata {

                    float4 vertex : POSITION;

                    float3 normal : NORMAL;

                    UNITY_VERTEX_INPUT_INSTANCE_ID 

                };


                struct v2f {

                    float4 pos : SV_POSITION;

                    UNITY_VERTEX_OUTPUT_STEREO 

                };


                fixed4 _Color;


                v2f vert(appdata v) {

                    v2f o;

                    UNITY_SETUP_INSTANCE_ID(v); 

                    UNITY_INITIALIZE_OUTPUT(v2f, o); 

                    UNITY_INITIALIZE_VERTEX_OUTPUT_STEREO(o); 

                    o.pos = UnityObjectToClipPos(v.vertex);

                    return o;

                }


                fixed4 frag(v2f i) : SV_Target {

                    // Render with object color

                    return _Color;

                }

                ENDCG

            }

    }

        FallBack "Diffuse"

}


2023年11月17日金曜日

GPT-4Vを使ってカメラ画像とテキストを用いて対話する

 using UnityEngine;

using System.Collections;

using UnityEngine.Networking;

using System;

using TMPro;

using Newtonsoft.Json;

using System.Collections.Generic; 


public class ImageRequester : MonoBehaviour

{

    private string apiURL = "https://api.openai.com/v1/chat/completions";

    private string apiKey = "APIKey"; 

    public Camera mainCamera;

    public RenderTexture renderTexture;

    public TMP_Text tmpText;

    public bool TextVisible = false;


    private List<Message> conversationHistory = new List<Message>();




    [System.Serializable]

    public class ChatRequest

    {

        public string model = "gpt-4-vision-preview";

        public Message[] messages;

        public int max_tokens = 300;

    }


    [System.Serializable]

    public class Message

    {

        public string role;

        public Content[] content;

    }


    [Serializable]

    public class Content

    {

        public string type;

        public string text;

        public ImageURL image_url;

    }

       

    [Serializable]

    public class ImageURL

    {

        public string url;

    }


    [Serializable]

    public class ResponseData

    {

        public Choice[] choices;


        [Serializable]

        public class Choice

        {

            public Message message;

        }


        [Serializable]

        public class Message

        {

            public string content;

        }

    }



    private string ParseResponse(string response)

    {

        try

        {

            ResponseData data = JsonConvert.DeserializeObject<ResponseData>(response);

            if (data != null && data.choices != null && data.choices.Length > 0 && data.choices[0].message != null)

            {

                return data.choices[0].message.content;

            }

        }

        catch (Exception e)

        {

            Debug.LogError("Error parsing response: " + e.Message);

        }

        return string.Empty;

    }



    void Start()

    {

        conversationHistory.Add(new Message

        {

            role = "system",

            content = new[]

            {

                         new Content { type = "text", text = "あなたは親切なアシスタントです。" }

            }

        });

    }



    public void RequestButtonClicked(string prompt)

    {

        if (tmpText.IsActive())

        {

            tmpText.gameObject.SetActive(false);


        }

        else

        {

            tmpText.gameObject.SetActive(true);

            tmpText.text = "";

            ImageRequestToGPT(prompt);


        }

    }


    public void ImageRequestToGPT(string prompt)

    {


        Debug.Log("Button Clicked. Requesting image description...");


        mainCamera.targetTexture = renderTexture;

        mainCamera.Render();

        

        RenderTexture.active = renderTexture;

        Texture2D image = new Texture2D(renderTexture.width, renderTexture.height);

        image.ReadPixels(new Rect(0, 0, renderTexture.width, renderTexture.height), 0, 0);

        image.Apply();


        tmpText.text = "Start requesting image description...";

   

        byte[] imageBytes = image.EncodeToPNG();

        string base64Image = Convert.ToBase64String(imageBytes);


        mainCamera.targetTexture = null;

        RenderTexture.active = null; 


        StartCoroutine(CallOpenAIVisionAPI(prompt,base64Image)); 

        Destroy(image);

    }



    IEnumerator CallOpenAIVisionAPI(string prompt, string base64Image)

    {

        var request = new UnityWebRequest(apiURL, "POST");

        Message newMessage = new Message

        {

            role = "user",

            content = new[]

                     {

                         new Content { type = "text", text = prompt },

                         new Content { type = "image_url", image_url = new ImageURL{url=$"data:image/png;base64,{base64Image}" } }

                     }

        };


        conversationHistory.Add(newMessage);


        Message[] messagesToSend = conversationHistory.ToArray();

        ChatRequest chatRequest = new ChatRequest { messages = messagesToSend };




        var settings = new JsonSerializerSettings

        {

            NullValueHandling = NullValueHandling.Ignore

        };


        string jsonPayload = JsonConvert.SerializeObject(chatRequest, settings);


        Debug.Log("jsonPayload" + jsonPayload);



        request.uploadHandler = new UploadHandlerRaw(System.Text.Encoding.UTF8.GetBytes(jsonPayload));

        request.downloadHandler = new DownloadHandlerBuffer();

        request.SetRequestHeader("Content-Type", "application/json");

        request.SetRequestHeader("Authorization", $"Bearer {apiKey}");



        yield return request.SendWebRequest();



        if (request.isNetworkError || request.isHttpError)

        {

            Debug.LogError($"Error: {request.error}");

        }

        else

        {


            string description = ParseResponse(request.downloadHandler.text);

            Debug.Log("Description: " + description);


            tmpText.text = description;


        }

    }

}


2023年10月19日木曜日

The Meshes of ModularAnimalRobotBiped

 

Tails

Power parts

Pelvises

Necks

Legs

Heads

Bodies

Arms

Animation patterns of ModularAnimalRobotBiped

 







How To Use ModularAnimalRobotBiped.

https://www.youtube.com/watch?v=nUGIblZhw9Q

Modular Animal Robot Biped is an asset that allows you to create characters with various patterns by combining parts consisting of legs, pelvis, torso, neck, head, tail, and arms.


You can change the mesh, material, and size of each part by opening BP_AnimalRobotBipedAnimatedCharacter and changing the variable AnimalRobotBipedPattern.

- Three patterns of materials are available for each part.

Materials can also be changed randomly in bulk.
If you turn on IsChangeMaterialPattern, it will be set randomly, and if you turn on IsFixMaterialPattern, it will be unified to the material specified in FixedMaterialPattern.

When RandomColor is turned on, a random parameter with a spread of ColorRandomnessIntensity centered on CenterColor will be set to the material.

Turning on IsRandomMesh sets the mesh in a random pattern.
When IsRandomSize is turned on, the size of each part will be set randomly within the range of 0.8-1.2.

When randomness is on, the variables change randomly each time you change the variable.
These randomnesses are applied when you spawn into the scene, so they won't look the same in the viewport.
If you have a pattern you want to use, use the variable values as follows.
1. Copy the AnimalRobotBipedPattern of the character in the scene
2.Open BP_AnimalRobotBipedAnimatedCharacter and turn off randomness.
3. Paste the AnimalRobotBipedPattern.

By using EUW_BipedCharacterAnimatedSpawn, you can place Xnumber x Ynumber BP_AnimalRobotBipedAnimatedCharacters in a random pattern in the scene.

Control rigs are set on the legs and arms.
The pelvic suspension has a spring that expands and contracts according to your movements.

In the demo scene, you can check the behavior of the third-person character.

BP_AnimalRobotBipedProceduralCharacter is similar to BP_AnimalRobotBipedAnimatedCharacter, but the walking animation is procedural. 

画像サイズの一括変更

WindowsでImageMagick
https://imagemagick.org/index.php
でフォルダとサブフォルダ内のpng画像ファイルのサイズを1024x1024に変える方法
コマンドプロンプトでフォルダに移動し

 forfiles /S /M *.png /C "cmd /c magick mogrify -resize 1024x1024 @path

を実行

2023年9月27日水曜日

選択した複数のメッシュについてアーマチュアも含めて個別にFBXとしてエクスポートするBlender Pythonスクリプト

 import bpy


# 選択されたメッシュオブジェクトを取得

selected_meshes = [obj for obj in bpy.context.selected_objects if obj.type == 'MESH']


for mesh in selected_meshes:

    # すべてのオブジェクトの選択を解除

    bpy.ops.object.select_all(action='DESELECT')

    

    # メッシュを選択

    mesh.select_set(True)

    

    # メッシュに関連付けられているアーマチュアを探す

    armature = None

    if mesh.parent and mesh.parent.type == 'ARMATURE':

        armature = mesh.parent

        armature.select_set(True)

    

    # アクティブなオブジェクトを設定 (エクスポートの際に必要)

    bpy.context.view_layer.objects.active = mesh

    

    # FBXとしてエクスポート

    bpy.ops.export_scene.fbx(

        filepath=f"path_to_save/{mesh.name}.fbx",

        use_selection=True,

        mesh_smooth_type='FACE',

        bake_anim=False,

        add_leaf_bones=False,

        primary_bone_axis='X',

        secondary_bone_axis='Y',

        global_scale=1.0

    )