diff --git a/CMake配置练习.txt b/CMake配置练习.txt new file mode 100644 index 0000000..3bad442 --- /dev/null +++ b/CMake配置练习.txt @@ -0,0 +1,60 @@ +cmake_minimum_required(VERSION 3.5) + +project(untitled6 VERSION 0.1 LANGUAGES CXX) + +set(CMAKE_INCLUDE_CURRENT_DIR ON) + +set(CMAKE_AUTOUIC ON) +set(CMAKE_AUTOMOC ON) +set(CMAKE_AUTORCC ON) + +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +#list(APPEND CMAKE_PREFIX_PATH D:\\Download\\opencv4_6_0\\build\\x64\\vc15\\lib) +set(OpenCV_DIR /home/jcen/Downloads/openCV4.5.5_WithContrib_LinuxBuild/openCV4.5.5/lib/cmake/opencv4) +find_package(OpenCV REQUIRED) + +find_package(QT NAMES Qt6 Qt5 REQUIRED COMPONENTS Widgets) +find_package(Qt${QT_VERSION_MAJOR} REQUIRED COMPONENTS Widgets) + +set(PROJECT_SOURCES + main.cpp + widget.cpp + widget.h +) + +if(${QT_VERSION_MAJOR} GREATER_EQUAL 6) + qt_add_executable(untitled6 + MANUAL_FINALIZATION + ${PROJECT_SOURCES} + ) +else() + if(ANDROID) + add_library(untitled6 SHARED + ${PROJECT_SOURCES} + ) + else() + add_executable(untitled6 + ${PROJECT_SOURCES} + ) + endif() +endif() + +target_link_libraries(untitled6 PRIVATE Qt${QT_VERSION_MAJOR}::Widgets ${OpenCV_LIBS}) + +set_target_properties(untitled6 PROPERTIES + MACOSX_BUNDLE_GUI_IDENTIFIER my.example.com + MACOSX_BUNDLE_BUNDLE_VERSION ${PROJECT_VERSION} + MACOSX_BUNDLE_SHORT_VERSION_STRING ${PROJECT_VERSION_MAJOR}.${PROJECT_VERSION_MINOR} + MACOSX_BUNDLE TRUE + WIN32_EXECUTABLE TRUE +) + +install(TARGETS untitled6 + BUNDLE DESTINATION . + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}) + +if(QT_VERSION_MAJOR EQUAL 6) + qt_finalize_executable(untitled6) +endif() diff --git a/chat聊天记录.txt b/chat聊天记录.txt new file mode 100644 index 0000000..7b12e56 --- /dev/null +++ b/chat聊天记录.txt @@ -0,0 +1 @@ +{"chat_list":[{"name":"平面图隧道效果制作指南","list":[{"text":"图像处理,在平面图上做一个隧道的效果","role":"me"},{"text":"要在平面图上实现隧道的效果,可以使用图像处理软件(如Photoshop)来完成。下面是一种可能的方法:\n\n1. 打开图像处理软件,并导入你想要添加隧道效果的平面图。\n\n2. 创建一个新的图层,命名为“隧道”。\n\n3. 使用椭圆选择工具,在图像上选择一个适当的位置和大小,以模拟隧道的入口。确保选择的形状与隧道的形状相匹配。\n\n4. 在“隧道”图层上,使用渐变工具,选择一个合适的渐变(例如从黑色到透明),并在选定的椭圆区域上应用渐变。这将创建一个从黑色到透明的渐变效果,模拟隧道的深度。\n\n5. 调整渐变的透明度和不透明度,以使隧道看起来更加...","role":"robot"},{"text":"c++图像处理,在平面图上做一个隧道的效果","role":"me"},{"text":"要在C++中实现在平面图上添加隧道效果,你可以使用图像处理库(如OpenCV)来完成。下面是一种可能的方法:\n\n1. 导入OpenCV库,并加载你想要添加隧道效果的平面图像。\n\n```cpp\n#include \n\nint main() {\n cv::Mat image = cv::imread(\"input.jpg\");\n if (image.empty()) {\n std::cout << \"Failed to load image!\" << std::endl;\n return -1;\n }\n\n // 在这里添加隧道效果的代码\n\n cv::imshow(\"Output\", image);\n cv::waitKey(0);\n\n return 0;\n}\n```\n\n2. 创建一个新的图像,用于存储添加隧道效果后的结果。\n\n```cpp\ncv::Mat result = cv::Mat::zeros(image.size(), image.type());\n```\n\n3. 定义隧道的参数,例如入口位置、宽度和颜色。\n\n```cpp\ncv::Point entrance(200, 200); // 入口位置\nint width = 100; // 隧道宽度\ncv::Scalar color(0, 0, 0); // 隧道颜色(黑色)\n```\n\n4. 使用循环遍历图像的每个像素,并根据距离入口的距离来设置像素的颜色。\n\n```cpp\nfor (int y = 0; y < image.rows; y++) {\n for (int x = 0; x < image.cols; x++) {\n // 计算当前像素到入口的距离\n double distance = cv::norm(cv::Point(x, y) - entrance);\n\n // 根据距离设置像素的颜色\n if (distance < width) {\n result.at(y, x) = image.at(y, x) * (1 - distance / width) + color * (distance / width);\n } else {\n result.at(y, x) = image.at(y, x);\n }\n }\n}\n```\n\n5. 显示并保存添加隧道效果后的图像。\n\n```cpp\ncv::imshow(\"Output\", result);\ncv::imwrite(\"output.jpg\", result);\ncv::waitKey(0);\n```\n\n这样,你就可以在C++中使用OpenCV库实现在平面图上添加隧道效果了。请注意,以上代码仅为示例,你可以根据实际需求进行调整和优化。","role":"robot"},{"text":"c++图像处理,在平面图上做一个隧道的效果,不借助任何库,遍历所有像素做算法,一个红色的隧道","role":"me"},{"text":"要在C++中实现在平面图上添加红色隧道效果,不借助任何库,可以使用以下算法:\n\n1. 加载图像并获取图像的宽度和高度。\n\n```cpp\n#include \n#include \n\nstruct Pixel {\n unsigned char r, g, b;\n};\n\nint main() {\n std::ifstream file(\"input.raw\", std::ios::binary);\n if (!file) {\n std::cout << \"Failed to open image file!\" << std::endl;\n return -1;\n }\n\n int width = 640; // 图像宽度\n int height = 480; // 图像高度\n\n // 读取图像数据\n Pixel* image = new Pixel[width * height];\n file.read(reinterpret_cast(image), width * height * sizeof(Pixel));\n file.close();\n\n // 在这里添加隧道效果的代码\n\n // 保存图像数据\n std::ofstream outFile(\"output.raw\", std::ios::binary);\n outFile.write(reinterpret_cast(image), width * height * sizeof(Pixel));\n outFile.close();\n\n delete[] image;\n\n return 0;\n}\n```\n\n2. 定义隧道的参数,例如入口位置、宽度和颜色。\n\n```cpp\nint entranceX = 320; // 入口X坐标\nint entranceY = 240; // 入口Y坐标\nint width = 100; // 隧道宽度\nunsigned char red = 255; // 隧道颜色(红色)\n```\n\n3. 使用双重循环遍历图像的每个像素,并根据距离入口的距离来设置像素的颜色。\n\n```cpp\nfor (int y = 0; y < height; y++) {\n for (int x = 0; x < width; x++) {\n // 计算当前像素到入口的距离\n int distanceX = x - entranceX;\n int distanceY = y - entranceY;\n double distance = std::sqrt(distanceX * distanceX + distanceY * distanceY);\n\n // 根据距离设置像素的颜色\n if (distance < width) {\n int index = y * width + x;\n image[index].r = red;\n image[index].g = 0;\n image[index].b = 0;\n }\n }\n}\n```\n\n4. 保存添加隧道效果后的图像数据。\n\n```cpp\nstd::ofstream outFile(\"output.raw\", std::ios::binary);\noutFile.write(reinterpret_cast(image), width * height * sizeof(Pixel));\noutFile.close();\n```\n\n这样,你就可以在C++中实现在平面图上添加红色隧道效果了。请注意,以上代码仅为示例,你可以根据实际需求进行调整和优化。同时,你需要将输入图像保存为二进制文件(如.raw)以便读取和处理。","role":"robot"},{"text":"kernel BoxBlur1D : public ImageComputationKernel\n{\n Image src;\n Image dst;\n\nparam:\n int radius; //The radius of our box blur\n\nlocal:\n int _filterWidth;\n\n void define() {\n //RIP node will identify radius as the apron\n defineParam(radius, \"Radius\", 5); \n }\n\n void init() {\n //Set the range we need to access from the source \n src.setRange(-radius, radius);\n\n //Set the axis for the 1D-range to be horizontal\n src.setAxis(eX);\n\n _filterWidth = 2 * radius + 1;\n }\n\n void process() {\n float sum = 0.0f;\n for(int i = -radius; i <= radius; i++)\n sum += src(i);\n dst() = sum / (float)_filterWidth;\n }\n};\n这个所用到的算法","role":"me"},{"text":"这个算法是一个一维盒式模糊(Box Blur)的图像处理算法。它使用一个窗口(卷积核)在图像上滑动,计算窗口内像素的平均值,并将结果写入目标图像。\n\n算法步骤如下:\n\n1. 定义一个名为BoxBlur1D的图像处理内核,继承自ImageComputationKernel类。\n\n2. 声明输入图像src和输出图像dst。\n\n3. 声明一个参数radius,表示盒式模糊的半径。\n\n4. 声明一个局部变量_filterWidth,表示卷积核的宽度。\n\n5. 在define()函数中定义参数radius,并设置默认值为5。\n\n6. 在init()函数中设置源图像src的访问范围为[-radius, radius],并指定访问范围的轴为水平方向(eX)。\n\n7. 在init()函数中计算卷积核的宽度,即_filterWidth = 2 * radius + 1。\n\n8. 在process()函数中,使用一个循环遍历卷积核内的像素,计算它们的和。\n\n9. 将计算得到的和除以卷积核的宽度,得到平均值,并将结果写入目标图像dst。\n\n这个算法实现了一维盒式模糊,通过计算窗口内像素的平均值来实现图像的模糊效果。你可以根据需要调整半径参数来控制模糊的程度。","role":"robot"},{"text":"c++一个类的大小是固定的吗","role":"me"},{"text":"在C++中,一个类的大小不一定是固定的。类的大小取决于其成员变量和继承关系。\n\n1. 成员变量:类的大小会受到其成员变量的影响。每个成员变量都会占用一定的内存空间,其大小取决于变量的类型和对齐方式。如果类中有多个成员变量,它们会按照一定的对齐规则排列在内存中。\n\n2. 继承关系:如果一个类继承自其他类,那么它的大小还会受到继承关系的影响。派生类会包含基类的成员变量,并且可能会有额外的虚函数表指针(vptr)等信息。这些都会增加派生类的大小。\n\n此外,编译器还可能对类的大小进行优化,例如通过对齐和填充来减小内存的浪费。因此,即使两个类具有相同的成员变量,它们的大小也可能不同。\n\n可以使用sizeof运算符来获取一个类的大小,例如sizeof(MyClass)。这将返回类的大小(以字节为单位),但请注意,sizeof的结果可能包含填充字节,以满足对齐要求。","role":"robot"},{"text":"如果我一个类有十个int变量,这个类会是多大","role":"me"},{"text":"在C++中,int类型通常占用4个字节(32位系统)或8个字节(64位系统)。如果一个类有十个int变量,那么这个类的大小将是10个int变量的大小之和。\n\n假设我们使用的是32位系统,那么一个int变量占用4个字节。因此,十个int变量的总大小为10 * 4 = 40个字节。\n\n如果我们使用的是64位系统,那么一个int变量占用8个字节。因此,十个int变量的总大小为10 * 8 = 80个字节。\n\n需要注意的是,编译器可能会对类的大小进行优化,例如进行对齐和填充,以满足对齐要求。这可能会导致类的实际大小大于简单地将每个成员变量的大小相加。可以使用sizeof运算符来获取一个类的大小,例如sizeof(MyClass),这将返回类的大小(以字节为单位)。","role":"robot"},{"text":"举个例子","role":"me"},{"text":"假设我们有一个类叫做MyClass,其中包含十个int变量:\n\n```cpp\nclass MyClass {\n int var1;\n int var2;\n int var3;\n int var4;\n int var5;\n int var6;\n int var7;\n int var8;\n int var9;\n int var10;\n};\n```\n\n在32位系统上,每个int变量占用4个字节,因此这个类的大小为10 * 4 = 40个字节。\n\n在64位系统上,每个int变量占用8个字节,因此这个类的大小为10 * 8 = 80个字节。\n\n需要注意的是,编译器可能会对类的大小进行优化,例如进行对齐和填充,以满足对齐要求。因此,实际的类大小可能会大于简单地将每个成员变量的大小相加。可以使用sizeof运算符来获取一个类的大小,例如sizeof(MyClass),这将返回类的大小(以字节为单位)。","role":"robot"},{"text":"django的meta索引的应用","role":"me"},{"text":"在Django中,Meta类用于定义模型的元数据,其中包括索引的定义。索引是一种优化技术,用于提高数据库查询的性能。\n\n在模型类中使用Meta类来定义索引,可以通过在Meta类中添加index_together或者indexes属性来实现。\n\n1. index_together属性:可以用于定义多个字段的联合索引。例如,假设我们有一个模型类叫做Book,我们想要在title和author字段上创建一个联合索引,可以这样定义:\n\n```python\nclass Book(models.Model):\n title = models.CharField(max_length=100)\n author = models.CharField(max_length=100)\n\n class Meta:\n index_together = [\n ['title', 'author'],\n ]\n```\n\n2. indexes属性:可以用于定义单个字段或者多个字段的索引。例如,假设我们有一个模型类叫做Book,我们想要在title字段上创建一个单独的索引,可以这样定义:\n\n```python\nclass Book(models.Model):\n title = models.CharField(max_length=100)\n author = models.CharField(max_length=100)\n\n class Meta:\n indexes = [\n models.Index(fields=['title']),\n ]\n```\n\n通过定义索引,数据库在执行查询时可以更快地定位到相关的数据,从而提高查询的性能。请注意,索引的创建可能会对数据库的写入性能产生一定的影响,因此需要根据具体情况进行权衡和测试。","role":"robot"},{"text":"User.objects.all().order_by('-date_joined')这个的意思","role":"me"},{"text":"这段代码是使用Django的ORM(对象关系映射)来查询数据库中的User模型,并按照date_joined字段进行降序排序。\n\n具体解释如下:\n- User.objects.all():表示查询User模型中的所有记录。\n- .order_by('-date_joined'):表示按照date_joined字段进行降序排序。在这里,'-'表示降序排序,如果想要升序排序可以去掉'-'。\n\n综合起来,这段代码的作用是查询数据库中的所有User记录,并按照date_joined字段的值进行降序排序,即最新加入的用户排在前面。","role":"robot"},{"text":"ERROR: Could not find a version that satisfies the requirement channels (from versions: none)\nERROR: No matching distribution found for channels","role":"me"},{"text":"这个错误通常是因为你尝试安装的包在PyPI(Python Package Index)中没有找到。在这种情况下,可能是因为你尝试安装的是Django Channels,而Django Channels并不是通过pip安装的方式来获取的。\n\nDjango Channels是一个独立的包,它提供了在Django中实现WebSocket和异步通信的功能。你可以通过以下方式来安装Django Channels:\n\n```bash\npip install channels\n```\n\n另外,Django Channels的安装可能还需要其他依赖项,你可以查看官方文档以获取更多信息:https://channels.readthedocs.io/en/stable/\n\n如果你仍然遇到问题,可以尝试查看一下你的Python环境和pip版本是否正确,并且确认你的网络连接正常。","role":"robot"},{"text":"Could not fetch URL https://pypi.org/simple/channels/: There was a problem confirming the ssl certificate: HTTPSConnectionPool(host='pypi.org', port=443): Max retries exceeded with url: /simple/channels/ (Caused by SSLError(SSLEOFError(8,\n 'EOF occurred in violation of protocol (_ssl.c:1129)'))) - skipping\nERROR: Could not find a version that satisfies the requirement channels (from versions: none)\nERROR: No matching distribution found for channels","role":"me"},{"text":"这个错误通常是由于与PyPI(Python Package Index)的连接问题或SSL证书验证问题导致的。有几种方法可以尝试解决这个问题:\n\n1. **尝试使用代理**:有时候网络环境的限制可能导致无法连接到PyPI。你可以尝试使用代理来解决这个问题。例如,你可以设置pip的代理:\n\n```bash\npip install --proxy=http://your_proxy:your_port channels\n```\n\n2. **更新pip**:确保你的pip版本是最新的,可以通过以下命令来更新pip:\n\n```bash\npip install --upgrade pip\n```\n\n3. **忽略SSL证书验证**:在一些情况下,可以通过忽略SSL证书验证来解决问题。你可以尝试添加`--trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org`参数来忽略SSL证书验证:\n\n```bash\npip install --trusted-host pypi.org --trusted-host pypi.python.org --trusted-host=files.pythonhosted.org channels\n```\n\n请注意,忽略SSL证书验证可能会存在一定的安全风险,建议在安全的网络环境下使用。\n\n如果以上方法仍然无法解决问题,建议检查你的网络连接、防火墙设置以及SSL证书配置,或者尝试在其他网络环境下进行安装。","role":"robot"},{"text":"simple_history这个是python的库吗","role":"me"},{"text":"是的,simple_history是一个Python库,用于在Django应用程序中实现简单的历史记录功能。通过simple_history库,你可以轻松地跟踪Django模型的更改历史,包括记录对象的创建、修改和删除操作,以及查看历史记录的详细信息。\n\nsimple_history库的主要功能包括:\n- 记录模型对象的历史更改,包括字段值的变化。\n- 查看特定对象的历史记录。\n- 恢复对象到先前的状态。\n- 自定义历史记录的存储方式和显示格式。\n\n你可以通过以下方式安装simple_history库:\n```bash\npip install django-simple-history\n```\n\n安装完成后,你可以在Django项目的settings.py文件中将simple_history添加到INSTALLED_APPS中,并在需要跟踪历史记录的模型中使用simple_history.models.SimpleHistory来替代Django的models.Model。\n\nsimple_history库的文档和更多信息可以在其官方网站上找到:https://django-simple-history.readthedocs.io/en/latest/","role":"robot"},{"text":"zbrush 有没有linux版","role":"me"},{"text":"目前,Pixologic公司官方并没有提供ZBrush的Linux版本。ZBrush是一款专业的数字雕刻和绘画软件,主要支持Windows和macOS操作系统。虽然Linux用户目前无法直接在Linux系统上运行ZBrush,但你可以考虑以下几种替代方案:\n\n1. **使用虚拟机或Wine**:你可以尝试在Linux系统上安装虚拟机软件(如VirtualBox)或Wine(Windows模拟器),然后在虚拟机或Wine中安装Windows版本的ZBrush。这种方法可能会有一些性能损失,但可以让你在Linux系统上运行ZBrush。\n\n2. **考虑其他数字雕刻软件**:如果你在Linux系统上工作,并且需要一款数字雕刻软件,可以考虑使用其他支持Linux的数字雕刻软件,如Blender、SculptGL等。这些软件在Linux系统上运行良好,并且提供了类似ZBrush的功能。\n\n3. **等待官方支持**:虽然目前Pixologic公司没有提供ZBrush的Linux版本,但未来可能会考虑开发适用于Linux系统的版本。你可以关注Pixologic公司的官方消息,以获取最新的产品更新和发布信息。\n\n总的来说,虽然目前ZBrush没有官方的Linux版本,但你可以尝试使用虚拟机、Wine或考虑其他数字雕刻软件来满足在Linux系统上进行数字雕刻和绘画的需求。","role":"robot"},{"text":"入门transfromes的知识基础","role":"me"},{"text":"Transformer是一种用于处理序列数据的深度学习模型,最初由Google提出并广泛应用于自然语言处理任务,如机器翻译、文本生成等。Transformer模型的核心是自注意力机制(Self-Attention),它能够在输入序列中建立全局依赖关系,从而更好地捕捉序列中的长距离依赖关系。\n\n要入门Transformer模型的知识基础,你可以按照以下步骤进行学习:\n\n1. **了解自注意力机制**:自注意力机制是Transformer模型的核心组件,它允许模型在输入序列中同时关注不同位置的信息。你可以学习自注意力机制的原理和计算方式,以及它如何帮助模型捕捉序列中的重要信息。\n\n2. **学习Transformer的结构**:Transformer模型由编码器(Encoder)和解码器(Decoder)组成,每个编码器和解码器都包含多个层,每个层由多头自注意力和前馈神经网络组成。你可以学习Transformer模型的整体结构和每个组件的作用。\n\n3. **掌握Transformer的训练和推理过程**:了解Transformer模型的训练过程,包括输入数据的处理、模型的损失函数、优化器的选择等。同时,了解Transformer模型在推理阶段的工作方式,如如何生成输出序列。\n\n4. **实践Transformer模型**:通过使用深度学习框架(如PyTorch、TensorFlow等)实现Transformer模型,并在自然语言处理任务(如机器翻译、文本生成等)上进行实际应用。这样可以帮助你更深入地理解Transformer模型的工作原理和应用场景。\n\n5. **阅读相关论文和教程**:阅读Transformer模型的原始论文《Attention is All You Need》,以及相关的教程和博客文章,可以帮助你更全面地了解Transformer模型的发展历程和应用领域。\n\n通过以上步骤,你可以建立起对Transformer模型的基础知识和理解,为进一步深入学习和应用Transformer模型打下基础。祝你学习顺利!","role":"robot"},{"text":"学习llm","role":"me"},{"text":"LLM(Large Language Model)是指大型语言模型,是一种基于深度学习的模型,用于处理自然语言处理任务。LLM通常是指参数数量庞大、训练数据丰富的语言模型,如GPT(Generative Pre-trained Transformer)系列模型、BERT(Bidirectional Encoder Representations from Transformers)等。\n\n要学习LLM,你可以按照以下步骤进行:\n\n1. **了解语言模型的基础知识**:首先,你需要了解什么是语言模型以及它在自然语言处理中的作用。语言模型是用来预测文本序列的概率分布,可以用于文本生成、语言理解等任务。\n\n2. **学习Transformer模型**:LLM通常基于Transformer模型构建,因此你需要学习Transformer模型的原理、结构和工作方式。了解Transformer模型的自注意力机制、编码器-解码器结构等内容。\n\n3. **熟悉预训练技术**:LLM通常通过预训练技术进行训练,然后在特定任务上进行微调。学习预训练技术的原理和常见方法,如自监督学习、掩码语言建模等。\n\n4. **掌握常见的LLM模型**:学习常见的LLM模型,如GPT系列、BERT等,了解它们的结构、训练方式和应用场景。可以通过阅读相关论文和教程来深入了解这些模型。\n\n5. **实践应用**:通过使用深度学习框架(如PyTorch、TensorFlow等)实现LLM模型,并在自然语言处理任务上进行实际应用,如文本生成、情感分析等。实践是学习的最好方式,可以帮助你更深入地理解LLM模型的工作原理和应用场景。\n\n6. **跟踪最新进展**:LLM领域发展迅速,不断涌现出新的模型和技术。因此,建议你关注最新的研究成果和进展,保持学习的持续性。\n\n通过以上步骤,你可以建立起对LLM的基础知识和理解,为进一步深入学习和应用LLM模型打下基础。祝你学习顺利!如果有任何问题,欢迎随时向我提问。","role":"robot"},{"text":"BlinkScript定义一个float3,并使用defineParam初始化","role":"me"},{"text":"在BlinkScript中,你可以定义一个float3类型的变量,并使用defineParam来初始化。BlinkScript是Nuke软件中用于编写GPU加速的图像处理算法的脚本语言。\n\n下面是一个示例代码,演示如何在BlinkScript中定义一个float3类型的变量,并使用defineParam初始化:\n\n```c\nkernel MyKernel : ImageComputationKernel\n{\n Image input;\n Image output;\n\n param:\n float3 myColor;\n\n void define() {\n defineParam(myColor, \"MyColor\", float3(1.0, 0.0, 0.0));\n }\n\n void process(int2 pos) {\n float3 pixel = input(pos).rgb;\n float3 result = pixel * myColor;\n output(pos) = result.rgb;\n }\n}\n```\n\n在上面的示例中,我们定义了一个名为myColor的float3类型的变量,并使用defineParam来初始化它。defineParam函数接受三个参数:变量名、参数名称和默认值。在这里,我们将myColor初始化为红色(1.0, 0.0, 0.0)。\n\n在process函数中,我们读取输入图像的像素值,并将其与myColor相乘,然后将结果写入输出图像。\n\n请注意,以上示例是一个简单的演示代码,实际应用中可能需要根据具体需求进行修改和扩展。希望这个示例能帮助你理解如何在BlinkScript中定义float3类型的变量并使用defineParam初始化。如果有任何问题,请随时向我提问。","role":"robot"},{"text":"判断每一个星期五python","role":"me"},{"text":"要编写一个Python程序来判断每个星期五,你可以使用datetime模块来获取当前日期,并判断是否为星期五。下面是一个示例代码,演示如何判断当前日期是否为星期五:\n\n```python\nimport datetime\n\n# 获取当前日期\ncurrent_date = datetime.datetime.now()\n\n# 判断当前日期是否为星期五(星期五对应的weekday是4)\nif current_date.weekday() == 4:\n print(\"今天是星期五!\")\nelse:\n print(\"今天不是星期五。\")\n```\n\n在上面的示例中,我们首先导入datetime模块,然后使用datetime.datetime.now()获取当前日期和时间。接着,我们使用weekday()方法来获取当前日期对应的星期几,其中星期五对应的数字是4。最后,我们通过判断当前日期的weekday是否为4来确定是否为星期五,并输出相应的提示信息。\n\n你可以将以上代码保存为一个Python脚本文件(如check_friday.py),然后在命令行中运行该脚本,即可判断当前日期是否为星期五。希望这个示例能帮助你实现判断每个星期五的功能。如果有任何问题,请随时向我提问。","role":"robot"},{"text":"QTabBar的右键信号弹出菜单","role":"me"},{"text":"在使用Qt中的QTabBar控件时,如果想要实现右键信号弹出菜单的功能,可以通过以下步骤来实现:\n\n1. **创建自定义的TabBar类**:首先,你可以创建一个自定义的TabBar类,继承自QTabBar,并在该类中重新实现mousePressEvent事件,以捕获鼠标右键点击事件。\n\n2. **在mousePressEvent中处理右键点击事件**:在自定义的TabBar类中重新实现mousePressEvent事件,判断是否为鼠标右键点击事件,如果是,则弹出菜单。\n\n3. **创建弹出菜单**:在弹出菜单中添加需要的菜单项,并连接相应的槽函数。\n\n下面是一个简单的示例代码,演示如何实现QTabBar的右键信号弹出菜单功能:\n\n```python\nfrom PyQt5.QtWidgets import QTabBar, QMenu\n\nclass CustomTabBar(QTabBar):\n def __init__(self, parent=None):\n super(CustomTabBar, self).__init__(parent)\n\n def mousePressEvent(self, event):\n if event.button() == Qt.RightButton:\n menu = QMenu(self)\n menu.addAction(\"菜单项1\")\n menu.addAction(\"菜单项2\")\n menu.exec_(event.globalPos())\n\n super(CustomTabBar, self).mousePressEvent(event)\n\n# 在使用QTabWidget时,将TabBar设置为自定义的TabBar\ntabWidget = QTabWidget()\ntabWidget.setTabBar(CustomTabBar())\n```\n\n在上面的示例中,我们创建了一个CustomTabBar类,继承自QTabBar,并重新实现了mousePressEvent事件。在mousePressEvent事件中,我们判断是否为鼠标右键点击事件,如果是,则创建一个QMenu对象,并添加两个菜单项。最后,通过exec_方法在鼠标右键点击位置弹出菜单。\n\n请注意,以上示例是一个简单的演示代码,实际应用中可能需要根据具体需求进行修改和扩展。希望这个示例能帮助你实现QTabBar的右键信号弹出菜单功能。如果有任何问题,请随时向我提问。","role":"robot"},{"text":"AttributeError: 'QTableWidget' object has no attribute 'setTabBar'","role":"me"},{"text":"对不起,我之前提供的示例代码中出现了错误。QTableWidget确实没有setTabBar方法,因为QTableWidget是用于显示表格数据的控件,而不是用于显示选项卡的控件。如果你想要在QTableWidget中实现右键信号弹出菜单的功能,可以通过以下步骤来实现:\n\n1. **创建自定义的TableWidget类**:首先,你可以创建一个自定义的TableWidget类,继承自QTableWidget,并在该类中重新实现contextMenuEvent事件,以捕获右键点击事件。\n\n2. **在contextMenuEvent中处理右键点击事件**:在自定义的TableWidget类中重新实现contextMenuEvent事件,创建并显示弹出菜单。\n\n下面是一个简单的示例代码,演示如何在QTableWidget中实现右键信号弹出菜单功能:\n\n```python\nfrom PyQt5.QtWidgets import QTableWidget, QMenu\n\nclass CustomTableWidget(QTableWidget):\n def __init__(self, parent=None):\n super(CustomTableWidget, self).__init__(parent)\n\n def contextMenuEvent(self, event):\n menu = QMenu(self)\n menu.addAction(\"菜单项1\")\n menu.addAction(\"菜单项2\")\n menu.exec_(event.globalPos())\n\n# 创建一个自定义的TableWidget对象\ntableWidget = CustomTableWidget()\n```\n\n在上面的示例中,我们创建了一个CustomTableWidget类,继承自QTableWidget,并重新实现了contextMenuEvent事件。在contextMenuEvent事件中,我们创建一个QMenu对象,并添加两个菜单项。最后,通过exec_方法在鼠标右键点击位置弹出菜单。\n\n希望这个示例能帮助你实现在QTableWidget中实现右键信号弹出菜单的功能。如果有任何问题,请随时向我提问。","role":"robot"},{"text":"QTableWidget获取正在选择的tabbar","role":"me"},{"text":"在Qt中,QTableWidget并不直接支持选项卡(tab)的功能,因此无法直接获取正在选择的选项卡。如果你想要在QTableWidget中实现类似选项卡的功能,并获取当前选择的选项卡,可以考虑使用QTabWidget和QTableWidget结合的方式来实现。\n\n下面是一个简单的示例代码,演示如何使用QTabWidget和QTableWidget结合,并获取当前选择的选项卡:\n\n```python\nfrom PyQt5.QtWidgets import QApplication, QTabWidget, QTableWidget, QTableWidgetItem\n\n# 创建QTabWidget和QTableWidget\ntabWidget = QTabWidget()\ntableWidget1 = QTableWidget()\ntableWidget2 = QTableWidget()\n\n# 将QTableWidget添加到QTabWidget中\ntabWidget.addTab(tableWidget1, \"Tab 1\")\ntabWidget.addTab(tableWidget2, \"Tab 2\")\n\n# 获取当前选择的选项卡索引\ncurrentTabIndex = tabWidget.currentIndex()\nprint(\"当前选择的选项卡索引:\", currentTabIndex)\n\n# 获取当前选择的选项卡名称\ncurrentTabName = tabWidget.tabText(currentTabIndex)\nprint(\"当前选择的选项卡名称:\", currentTabName)\n\n# 在当前选择的选项卡中添加一个表格项\ntableWidget = tabWidget.currentWidget()\ntableWidget.setItem(0, 0, QTableWidgetItem(\"Hello, World!\"))\n\n# 显示QTabWidget\ntabWidget.show()\n\n# 运行应用程序\napp = QApplication([])\napp.exec_()\n```\n\n在上面的示例中,我们创建了一个QTabWidget,并向其中添加了两个QTableWidget作为选项卡的内容。通过currentIndex()方法可以获取当前选择的选项卡索引,通过tabText()方法可以获取当前选择的选项卡名称。然后,我们在当前选择的选项卡中添加了一个表格项,并最后显示了QTabWidget。\n\n希望这个示例能帮助你实现在QTableWidget中获取当前选择的选项卡的功能。如果有任何问题,请随时向我提问。","role":"robot"},{"text":"点击tabbar的信号","role":"me"},{"text":"在Qt中,QTabBar控件本身并没有提供点击信号,但是你可以通过继承QTabBar并重新实现mousePressEvent事件来捕获点击事件。在捕获到点击事件后,你可以发送自定义的信号来实现类似点击信号的功能。\n\n下面是一个简单的示例代码,演示如何在QTabBar中捕获点击事件并发送自定义信号:\n\n```python\nfrom PyQt5.QtWidgets import QTabBar, QApplication\nfrom PyQt5.QtCore import pyqtSignal\n\nclass CustomTabBar(QTabBar):\n tabClicked = pyqtSignal(int)\n\n def __init__(self, parent=None):\n super(CustomTabBar, self).__init__(parent)\n\n def mousePressEvent(self, event):\n if event.button() == Qt.LeftButton:\n index = self.tabAt(event.pos())\n if index != -1:\n self.tabClicked.emit(index)\n\n super(CustomTabBar, self).mousePressEvent(event)\n\n# 创建一个自定义的TabBar对象\ntabBar = CustomTabBar()\n\n# 连接自定义信号的槽函数\ndef onTabClicked(index):\n print(\"Tab clicked:\", index)\n\ntabBar.tabClicked.connect(onTabClicked)\n\n# 显示TabBar\napp = QApplication([])\ntabBar.show()\napp.exec_()\n```\n\n在上面的示例中,我们创建了一个CustomTabBar类,继承自QTabBar,并重新实现了mousePressEvent事件。在mousePressEvent事件中,我们判断是否为鼠标左键点击事件,并获取点击的选项卡索引,然后通过自定义的tabClicked信号发送该索引。最后,我们连接了tabClicked信号的槽函数onTabClicked,用于处理选项卡点击事件。\n\n希望这个示例能帮助你实现在QTabBar中捕获点击事件并发送自定义信号的功能。如果有任何问题,请随时向我提问。","role":"robot"},{"text":"QTabWidget点击tab的信号","role":"me"},{"text":"在Qt中,QTabWidget控件本身并没有提供点击选项卡的信号,但是你可以通过继承QTabWidget并重新实现tabBarClicked事件来捕获选项卡点击事件。在捕获到点击事件后,你可以发送自定义的信号来实现类似点击信号的功能。\n\n下面是一个简单的示例代码,演示如何在QTabWidget中捕获选项卡点击事件并发送自定义信号:\n\n```python\nfrom PyQt5.QtWidgets import QTabWidget, QApplication\nfrom PyQt5.QtCore import pyqtSignal\n\nclass CustomTabWidget(QTabWidget):\n tabClicked = pyqtSignal(int)\n\n def __init__(self, parent=None):\n super(CustomTabWidget, self).__init__(parent)\n\n self.tabBarClicked.connect(self.onTabBarClicked)\n\n def onTabBarClicked(self, index):\n self.tabClicked.emit(index)\n\n# 创建一个自定义的TabWidget对象\ntabWidget = CustomTabWidget()\n\n# 连接自定义信号的槽函数\ndef onTabClicked(index):\n print(\"Tab clicked:\", index)\n\ntabWidget.tabClicked.connect(onTabClicked)\n\n# 添加选项卡\ntabWidget.addTab(QLabel(\"Tab 1\"), \"Tab 1\")\ntabWidget.addTab(QLabel(\"Tab 2\"), \"Tab 2\")\n\n# 显示TabWidget\napp = QApplication([])\ntabWidget.show()\napp.exec_()\n```\n\n在上面的示例中,我们创建了一个CustomTabWidget类,继承自QTabWidget,并重新实现了onTabBarClicked方法来捕获选项卡点击事件。在onTabBarClicked方法中,我们发送自定义的tabClicked信号,并在连接的槽函数onTabClicked中处理选项卡点击事件。\n\n希望这个示例能帮助你实现在QTabWidget中捕获选项卡点击事件并发送自定义信号的功能。如果有任何问题,请随时向我提问。","role":"robot"},{"text":"qt保存一个文件","role":"me"},{"text":"在Qt中,你可以使用QFileDialog类来实现文件保存功能。QFileDialog提供了一个标准的对话框,用于选择文件保存的路径和文件名。下面是一个简单的示例代码,演示如何在Qt中保存一个文件:\n\n```python\nfrom PyQt5.QtWidgets import QApplication, QWidget, QPushButton, QFileDialog\n\nclass FileSaveExample(QWidget):\n def __init__(self):\n super().__init__()\n\n self.initUI()\n\n def initUI(self):\n self.setWindowTitle('文件保存示例')\n self.setGeometry(100, 100, 300, 200)\n\n saveButton = QPushButton('保存文件', self)\n saveButton.clicked.connect(self.saveFile)\n\n self.show()\n\n def saveFile(self):\n options = QFileDialog.Options()\n fileName, _ = QFileDialog.getSaveFileName(self, \"保存文件\", \"\", \"All Files (*);;Text Files (*.txt)\", options=options)\n if fileName:\n with open(fileName, 'w') as file:\n file.write(\"Hello, World!\")\n\nif __name__ == '__main__':\n app = QApplication([])\n ex = FileSaveExample()\n app.exec_()\n```\n\n在上面的示例中,我们创建了一个FileSaveExample类,继承自QWidget,并在窗口中添加了一个按钮用于保存文件。在saveFile方法中,我们使用QFileDialog.getSaveFileName方法来打开文件保存对话框,用户选择保存的文件路径和文件名后,我们将\"Hello, World!\"写入到文件中。\n\n运行示例代码后,点击按钮会弹出文件保存对话框,选择保存的文件路径和文件名后,点击保存按钮即可保存文件。\n\n希望这个示例能帮助你实现在Qt中保存一个文件的功能。如果有任何问题,请随时向我提问。","role":"robot"},{"text":"遍历tablewidget的所有行和列","role":"me"},{"text":"在Qt中,可以使用QTableWidget的rowCount()和columnCount()方法来获取表格的行数和列数,然后通过循环遍历所有行和列。下面是一个简单的示例代码,演示如何遍历QTableWidget的所有行和列:\n\n```python\nfrom PyQt5.QtWidgets import QApplication, QTableWidget, QTableWidgetItem\n\n# 创建一个QTableWidget\ntableWidget = QTableWidget(3, 3)\n\n# 设置表头\ntableWidget.setHorizontalHeaderLabels(['Column 1', 'Column 2', 'Column 3'])\n\n# 填充表格数据\nfor row in range(tableWidget.rowCount()):\n for col in range(tableWidget.columnCount()):\n item = QTableWidgetItem(f'Row {row}, Col {col}')\n tableWidget.setItem(row, col, item)\n\n# 遍历所有行和列\nfor row in range(tableWidget.rowCount()):\n for col in range(tableWidget.columnCount()):\n item = tableWidget.item(row, col)\n if item is not None:\n print(f'Row {row}, Col {col}: {item.text()}')\n\n# 显示QTableWidget\ntableWidget.show()\n\n# 运行应用程序\napp = QApplication([])\napp.exec_()\n```\n\n在上面的示例中,我们创建了一个3x3的QTableWidget,并填充了表格数据。然后,通过两层循环遍历所有行和列,使用item()方法获取每个单元格的QTableWidgetItem对象,并打印出每个单元格的文本内容。\n\n运行示例代码后,你将看到控制台输出了所有行和列的内容。\n\n希望这个示例能帮助你遍历QTableWidget的所有行和列。如果有任何问题,请随时向我提问。","role":"robot"},{"text":"python的rez环境","role":"me"},{"text":"Rez是一个用于构建、打包和管理Python和其他软件包的工具。它主要用于在复杂的软件项目中管理依赖关系和版本控制。Rez允许你定义软件包的依赖关系、版本和构建规则,并提供了一种统一的方式来管理这些软件包。\n\n在Rez环境中,你可以创建一个包含所需软件包的虚拟环境,以便在其中运行你的Python程序。这样可以确保你的程序在不同环境中具有一致的依赖关系和版本。\n\n以下是使用Rez创建和管理Python环境的基本步骤:\n\n1. 安装Rez:首先,你需要安装Rez工具。你可以通过pip安装Rez:\n\n```bash\npip install rez\n```\n\n2. 创建Rez环境:使用Rez create命令创建一个新的Rez环境。你可以指定所需的Python版本、依赖软件包等信息。\n\n```bash\nrez env my_environment python-3.7\n```\n\n3. 激活Rez环境:使用Rez env命令激活你创建的Rez环境。\n\n```bash\nrez env my_environment\n```\n\n4. 在Rez环境中安装软件包:在激活的Rez环境中,你可以使用pip安装所需的Python软件包。\n\n```bash\npip install package_name\n```\n\n5. 运行Python程序:在激活的Rez环境中,你可以运行你的Python程序。\n\n```bash\npython your_program.py\n```\n\n通过使用Rez,你可以更好地管理Python程序的依赖关系和版本,确保程序在不同环境中的一致性。希望这些信息能帮助你了解Python的Rez环境。如果有任何问题,请随时向我提问。","role":"robot"},{"text":"18:19:30 ERROR PackageFamilyNotFoundError: package family not found: my_environment (searched: C:\\Users\\ARTIST\\packages; C:\\Users\\ARTIST\\.rez\\packages\\int; C:\\Users\\ARTIST\\.rez\\packages\\ext)","role":"me"},{"text":"根据你提供的错误信息,看起来Rez无法找到名为\"my_environment\"的包系列(package family)。这可能是因为你尚未创建名为\"my_environment\"的Rez环墋。\n\n请确保在使用`rez env my_environment python-3.7`命令之前已经创建了名为\"my_environment\"的Rez环境。你可以按照以下步骤重新创建Rez环境:\n\n1. 创建Rez环境:使用`rez env`命令创建一个新的Rez环境,并指定所需的Python版本。\n\n```bash\nrez env my_environment python-3.7\n```\n\n2. 激活Rez环境:使用`rez env`命令激活你创建的Rez环境。\n\n```bash\nrez env my_environment\n```\n\n如果你已经按照上述步骤创建了Rez环境,但仍然遇到问题,请确保你在正确的目录中运行Rez命令,并且Rez环境的配置正确。你可以尝试在Rez环境的配置文件中检查是否有任何错误。\n\n如果问题仍然存在,请提供更多详细信息,以便我能够更好地帮助你解决问题。感谢你的理解。","role":"robot"},{"text":"bat隐藏终端使用python运行一个py文件","role":"me"},{"text":"要在Windows中使用批处理文件(.bat)隐藏终端并运行一个Python脚本(.py文件),你可以使用以下方法:\n\n1. 使用pyinstaller打包Python脚本为可执行文件:\n - 首先,你可以使用pyinstaller工具将Python脚本打包为一个独立的可执行文件(.exe文件),这样可以隐藏终端窗口并直接运行程序。\n - 安装pyinstaller:如果你尚未安装pyinstaller,可以使用以下命令安装:\n\n ```bash\n pip install pyinstaller\n ```\n\n - 打包Python脚本:使用以下命令将Python脚本打包为可执行文件:\n\n ```bash\n pyinstaller --onefile your_script.py\n ```\n\n - 这将在dist目录中生成一个可执行文件,你可以在批处理文件中调用该可执行文件。\n\n2. 创建批处理文件来运行可执行文件:\n - 创建一个批处理文件(.bat文件),并在其中调用刚刚生成的可执行文件。\n - 打开文本编辑器,输入以下内容:\n\n ```batch\n @echo off\n start /B your_script.exe\n ```\n\n - 将\"your_script.exe\"替换为你生成的可执行文件的名称。\n - 保存文件并将其扩展名更改为\".bat\",例如\"run_script.bat\"。\n\n3. 运行批处理文件:\n - 双击运行批处理文件,它将隐藏终端窗口并运行Python脚本。\n\n通过这种方法,你可以使用批处理文件隐藏终端并运行一个Python脚本。希望这对你有所帮助。如果有任何问题,请随时向我提问。","role":"robot"},{"text":"我不想使用pyinstaller","role":"me"},{"text":"如果你不想使用pyinstaller来打包Python脚本为可执行文件,你仍然可以通过其他方式在Windows中隐藏终端并运行Python脚本。以下是一种方法:\n\n1. 创建一个批处理文件(.bat文件)来运行Python脚本:\n - 打开文本编辑器,输入以下内容:\n\n ```batch\n @echo off\n pythonw your_script.py\n ```\n\n - 将\"your_script.py\"替换为你要运行的Python脚本的文件名。\n - 保存文件并将其扩展名更改为\".bat\",例如\"run_script.bat\"。\n\n2. 运行批处理文件:\n - 双击运行批处理文件,它将使用pythonw命令来运行Python脚本,并隐藏终端窗口。\n\n在这种方法中,我们使用了pythonw命令来运行Python脚本,它与python命令不同,pythonw会在后台运行而不显示终端窗口。这样可以实现隐藏终端并运行Python脚本的效果。\n\n希望这种方法符合你的需求。如果有任何问题或需要进一步帮助,请随时告诉我。","role":"robot"},{"text":"皮克斯的usd文件也可以理解成一个可执行脚本?","role":"me"},{"text":"USD(Universal Scene Description)是一种用于描述和交换3D场景数据的开放标准。USD文件通常包含了场景中的几何、材质、动画等信息,以及场景的层次结构和其他元数据。在皮克斯的制作流程中,USD文件通常用于存储和交换复杂的3D场景数据。\n\nUSD文件本身并不是一个可执行脚本,而是一种数据格式,用于存储和传输3D场景数据。然而,USD文件可以被各种3D建模和渲染软件解析和处理,以便在这些软件中打开和编辑场景数据。\n\n在一些特定的工作流程中,可以编写脚本来处理USD文件,例如自动化导入导出、数据转换、场景分析等。这些脚本可以使用USD软件包提供的API或命令行工具来操作USD文件。\n\n总的来说,USD文件本身不是一个可执行脚本,但可以通过编写脚本来处理和操作USD文件。希望这能回答你的问题。如果有任何疑问,请随时告诉我。","role":"robot"},{"text":"POST /v1/files/downloadURL HTTP/1.1\nHost: api.example.com\nAuthorization: Bearer {user_token}\n\n{\n \"identifier\": \"file_123\"\n}\n如何用requests实现这个的post","role":"me"},{"text":"要使用Python的requests库发送上述POST请求,你可以按照以下步骤进行:\n\n1. 首先,确保你已经安装了requests库。如果没有安装,可以使用以下命令进行安装:\n\n```bash\npip install requests\n```\n\n2. 接下来,你可以使用以下Python代码来发送POST请求:\n\n```python\nimport requests\n\nurl = 'http://api.example.com/v1/files/downloadURL'\nheaders = {\n 'Authorization': 'Bearer {user_token}',\n 'Content-Type': 'application/json'\n}\ndata = {\n 'identifier': 'file_123'\n}\n\nresponse = requests.post(url, headers=headers, json=data)\n\nif response.status_code == 200:\n print('POST request successful')\n print('Response:', response.json())\nelse:\n print('POST request failed')\n print('Status code:', response.status_code)\n print('Response:', response.text)\n```\n\n在上面的代码中,我们指定了POST请求的URL、请求头(包括Authorization头)、请求数据(JSON格式)。然后使用requests.post()方法发送POST请求,并检查响应的状态码和内容。\n\n请记得将`{user_token}`替换为实际的用户令牌。此外,根据实际情况修改URL、请求头和请求数据。\n\n通过这段代码,你可以使用requests库发送包含Authorization头和JSON数据的POST请求。希望这对你有所帮助。如果有任何问题,请随时告诉我。","role":"robot"},{"text":"requests.exceptions.JSONDecodeError: Expecting value: line 1 column 1 (char 0)","role":"me"},{"text":"这个错误通常表示requests库在尝试解析响应内容时遇到了问题,可能是因为响应内容不是有效的JSON格式。这可能是由于服务器返回的响应不是JSON格式,或者在请求中出现了一些问题。\n\n为了解决这个问题,你可以尝试以下方法:\n\n1. 检查服务器响应:\n - 可以使用`response.text`输出响应内容,查看服务器返回的实际内容是什么。\n - 确保服务器返回的内容是有效的JSON格式,如果不是JSON格式,可能需要根据实际情况调整代码来处理不同的响应类型。\n\n2. 调整代码处理非JSON响应:\n - 如果服务器返回的内容不是JSON格式,你可以尝试使用`response.text`或`response.content`来获取响应内容,而不是尝试解析为JSON。\n - 根据实际情况,可以根据响应内容的类型来决定如何处理响应。\n\n以下是一个示例代码,可以根据响应内容的类型来处理:\n\n```python\nimport requests\n\nurl = 'http://api.example.com/v1/files/downloadURL'\nheaders = {\n 'Authorization': 'Bearer {user_token}',\n 'Content-Type': 'application/json'\n}\ndata = {\n 'identifier': 'file_123'\n}\n\nresponse = requests.post(url, headers=headers, json=data)\n\nif response.status_code == 200:\n try:\n json_response = response.json()\n print('POST request successful')\n print('Response:', json_response)\n except ValueError:\n print('Non-JSON response:')\n print('Response:', response.text)\nelse:\n print('POST request failed')\n print('Status code:', response.status_code)\n print('Response:', response.text)\n```\n\n通过以上代码,我们尝试解析响应为JSON格式,如果解析失败,则输出响应内容。这样可以更好地处理不同类型的响应内容。\n\n希望这些信息对你有所帮助。如果问题仍然存在或需要进一步帮助,请随时告诉我。","role":"robot"},{"text":"PUT {upload_url} HTTP/1.1\nHost: upload.example.com\nX-Amz-Meta-Userid: 12345\nX-Amz-Meta-Ctime: 1622552449\nX-Amz-Meta-Mtime: 1622552449\n\n{binary data of the file}\n{binary data of the file}的意思","role":"me"},{"text":"在你提供的PUT请求示例中,`{binary data of the file}`表示文件的二进制数据。在HTTP请求中,如果需要上传文件或二进制数据,通常会将文件内容以二进制形式包含在请求体中。\n\n对于PUT请求,通常用于上传文件或数据到服务器。在这种情况下,`{binary data of the file}`指的是要上传的文件的二进制数据。这些二进制数据将作为请求体的一部分发送到指定的`{upload_url}`。\n\n在Python中,你可以使用requests库来发送包含文件二进制数据的PUT请求。以下是一个示例代码:\n\n```python\nimport requests\n\nupload_url = 'http://upload.example.com'\nheaders = {\n 'X-Amz-Meta-Userid': '12345',\n 'X-Amz-Meta-Ctime': '1622552449',\n 'X-Amz-Meta-Mtime': '1622552449'\n}\n\nwith open('file_to_upload.bin', 'rb') as file:\n binary_data = file.read()\n\nresponse = requests.put(upload_url, headers=headers, data=binary_data)\n\nif response.status_code == 200:\n print('PUT request successful')\nelse:\n print('PUT request failed')\n print('Status code:', response.status_code)\n print('Response:', response.text)\n```\n\n在上面的代码中,我们打开一个二进制文件`file_to_upload.bin`,读取文件的二进制数据,并将其作为请求体发送到指定的`upload_url`。请确保将`file_to_upload.bin`替换为实际要上传的文件。\n\n通过这段代码,你可以使用requests库发送包含文件二进制数据的PUT请求。希望这对你有所帮助。如果有任何问题或需要进一步帮助,请随时告诉我。","role":"robot"},{"text":"Traceback (most recent call last):\n File \"D:\\TD\\Key\\gitea\\in2_api_demo\\demo.py\", line 192, in \n pprint(create_shot(shot_data=data))\n File \"D:\\TD\\Key\\gitea\\in2_api_demo\\demo.py\", line 69, in create_shot\n return requests.post(\"http://%s:%s/v1/shots/create\" % (IP, PORT), headers={\n File \"C:\\Users\\ARTIST\\Desktop\\nukepipeline\\venv\\venv39\\lib\\site-packages\\requests\\api.py\", line 115, in post\n return request(\"post\", url, data=data, json=json, **kwargs)\n File \"C:\\Users\\ARTIST\\Desktop\\nukepipeline\\venv\\venv39\\lib\\site-packages\\requests\\api.py\", line 59, in request\n return session.request(method=method, url=url, **kwargs)\n File \"C:\\Users\\ARTIST\\Desktop\\nukepipeline\\venv\\venv39\\lib\\site-packages\\requests\\sessions.py\", line 573, in request\n prep = self.prepare_request(req)\n File \"C:\\Users\\ARTIST\\Desktop\\nukepipeline\\venv\\venv39\\lib\\site-packages\\requests\\sessions.py\", line 484, in prepare_request\n p.prepare(\n File \"C:\\Users\\ARTIST\\Desktop\\nukepipeline\\venv\\venv39\\lib\\site-packages\\requests\\models.py\", line 371, in prepare\n self.prepare_body(data, files, json)\n File \"C:\\Users\\ARTIST\\Desktop\\nukepipeline\\venv\\venv39\\lib\\site-packages\\requests\\models.py\", line 559, in prepare_body\n body = self._encode_params(data)\n File \"C:\\Users\\ARTIST\\Desktop\\nukepipeline\\venv\\venv39\\lib\\site-packages\\requests\\models.py\", line 121, in _encode_params\n for k, vs in to_key_val_list(data):\nValueError: too many values to unpack (expected 2)","role":"me"},{"text":"根据你提供的错误信息,看起来是在准备请求体时出现了问题,导致了`ValueError: too many values to unpack (expected 2)`错误。这通常是由于`data`参数传递的格式不正确导致的。\n\n在你的代码中,可能是`data`参数的格式不符合`requests.post()`方法的要求,导致了这个错误。为了解决这个问题,你可以尝试以下方法:\n\n1. 检查`data`参数的格式:\n - 确保`data`参数是一个字典类型,其中包含要发送的数据。\n - 如果`data`参数包含的是键值对数据,可以将其转换为字典类型。\n\n2. 确保`data`参数的格式正确:\n - 确保`data`参数的格式符合`requests.post()`方法的要求,通常是一个字典类型。\n\n下面是一个示例代码,演示了如何正确使用`data`参数发送POST请求:\n\n```python\nimport requests\n\nIP = 'api.example.com'\nPORT = '8080'\ndata = {\n 'key1': 'value1',\n 'key2': 'value2'\n}\n\nresponse = requests.post(\"http://%s:%s/v1/shots/create\" % (IP, PORT), headers={'Content-Type': 'application/json'}, json=data)\n\nif response.status_code == 200:\n print('POST request successful')\n print('Response:', response.json())\nelse:\n print('POST request failed')\n print('Status code:', response.status_code)\n print('Response:', response.text)\n```\n\n在上面的代码中,我们将要发送的数据存储在一个字典`data`中,并使用`json=data`参数将其作为JSON数据发送到POST请求中。请根据你的实际情况调整数据格式和请求参数。\n\n希望这些信息对你有所帮助。如果问题仍然存在或需要进一步帮助,请随时告诉我。","role":"robot"},{"text":"post一个列表数据","role":"me"},{"text":"要发送一个包含列表数据的POST请求,你可以将列表作为JSON数据发送到请求体中。以下是一个示例代码,演示了如何使用requests库发送包含列表数据的POST请求:\n\n```python\nimport requests\n\nurl = 'http://api.example.com/v1/data'\ndata = {\n 'list_data': [1, 2, 3, 4, 5]\n}\n\nresponse = requests.post(url, json=data)\n\nif response.status_code == 200:\n print('POST request successful')\n print('Response:', response.json())\nelse:\n print('POST request failed')\n print('Status code:', response.status_code)\n print('Response:', response.text)\n```\n\n在上面的代码中,我们将一个包含整数的列表作为JSON数据存储在字典`data`中,并使用`json=data`参数将其作为JSON数据发送到POST请求中。当服务器收到请求后,可以解析JSON数据并获取列表数据。\n\n你可以根据实际情况修改数据内容和请求URL。希望这个示例对你有所帮助。如果有任何问题或需要进一步帮助,请随时告诉我。","role":"robot"},{"text":"[\n {\n \"shotNumber\": \"SHOT001\",\n \"shotGroup\": \"Group1\",\n \"previousShot\": \"SHOT000\"\n },\n {\n \"shotNumber\": \"SHOT002\",\n \"shotGroup\": \"Group1\",\n \"previousShot\": \"SHOT001\"\n }\n ]\n这种数据怎么post","role":"me"},{"text":"要POST发送包含多个字典的列表数据,你可以将整个列表作为JSON数据发送到请求体中。以下是一个示例代码,演示了如何使用requests库发送包含多个字典的列表数据的POST请求:\n\n```python\nimport requests\n\nurl = 'http://api.example.com/v1/shots/create'\ndata = [\n {\n \"shotNumber\": \"SHOT001\",\n \"shotGroup\": \"Group1\",\n \"previousShot\": \"SHOT000\"\n },\n {\n \"shotNumber\": \"SHOT002\",\n \"shotGroup\": \"Group1\",\n \"previousShot\": \"SHOT001\"\n }\n]\n\nresponse = requests.post(url, json=data)\n\nif response.status_code == 200:\n print('POST request successful')\n print('Response:', response.json())\nelse:\n print('POST request failed')\n print('Status code:', response.status_code)\n print('Response:', response.text)\n```\n\n在上面的代码中,我们将包含多个字典的列表数据存储在`data`变量中,并使用`json=data`参数将其作为JSON数据发送到POST请求中。当服务器收到请求后,可以解析JSON数据并获取每个字典的数据。\n\n你可以根据实际情况修改数据内容和请求URL。希望这个示例对你有所帮助。如果有任何问题或需要进一步帮助,请随时告诉我。","role":"robot"},{"text":"request DELETE请求","role":"me"},{"text":"要发送DELETE请求,你可以使用requests库中的`requests.delete()`方法。DELETE请求通常用于删除资源或数据。以下是一个示例代码,演示了如何使用requests库发送DELETE请求:\n\n```python\nimport requests\n\nurl = 'http://api.example.com/v1/resource/123' # 指定要删除的资源URL\n\nresponse = requests.delete(url)\n\nif response.status_code == 200:\n print('DELETE request successful')\n print('Resource deleted successfully')\nelse:\n print('DELETE request failed')\n print('Status code:', response.status_code)\n print('Response:', response.text)\n```\n\n在上面的代码中,我们指定了要删除的资源的URL,并使用`requests.delete()`方法发送DELETE请求。如果服务器成功删除资源,将返回状态码200。你可以根据实际情况修改资源URL和处理逻辑。\n\n希望这个示例对你有所帮助。如果有任何问题或需要进一步帮助,请随时告诉我。","role":"robot"},{"text":"request DELETE请求怎么传入数据","role":"me"},{"text":"在HTTP协议中,DELETE请求通常不允许在请求体中传递数据,因为DELETE请求的主要目的是删除资源,而不是传递数据。如果你需要在DELETE请求中传递数据,可以考虑将数据放在URL的查询参数中或者在请求头中传递。\n\n以下是两种常见的方法来在DELETE请求中传递数据:\n\n1. 将数据放在URL的查询参数中:\n```python\nimport requests\n\nurl = 'http://api.example.com/v1/resource/123?param1=value1¶m2=value2' # 将数据作为查询参数传递\nresponse = requests.delete(url)\n\nif response.status_code == 200:\n print('DELETE request successful')\n print('Resource deleted successfully')\nelse:\n print('DELETE request failed')\n print('Status code:', response.status_code)\n print('Response:', response.text)\n```\n\n2. 在请求头中传递数据:\n```python\nimport requests\n\nurl = 'http://api.example.com/v1/resource/123' # 指定要删除的资源URL\nheaders = {'Content-Type': 'application/json'}\ndata = {'key': 'value'} # 要传递的数据\n\nresponse = requests.delete(url, headers=headers, json=data)\n\nif response.status_code == 200:\n print('DELETE request successful')\n print('Resource deleted successfully')\nelse:\n print('DELETE request failed')\n print('Status code:', response.status_code)\n print('Response:', response.text)\n```\n\n在第二种方法中,我们将要传递的数据放在请求头中,并使用`json=data`参数将其作为JSON数据发送到DELETE请求中。\n\n请注意,虽然可以在DELETE请求中传递数据,但这并不是HTTP协议的标准做法。如果可能,建议考虑使用其他HTTP方法(如POST或PUT)来传递数据。\n\n希望这些信息对你有所帮助。如果有任何问题或需要进一步帮助,请随时告诉我。","role":"robot"},{"text":"PATCH请求","role":"me"},{"text":"PATCH请求是HTTP协议中的一种方法,用于部分更新资源。与PUT请求不同,PATCH请求允许客户端发送部分数据更新到服务器,而不需要发送整个资源的完整表示。\n\n要发送PATCH请求,你可以使用requests库中的`requests.patch()`方法。以下是一个示例代码,演示了如何使用requests库发送PATCH请求:\n\n```python\nimport requests\n\nurl = 'http://api.example.com/v1/resource/123' # 指定要更新的资源URL\ndata = {\n 'key1': 'value1',\n 'key2': 'value2'\n}\n\nresponse = requests.patch(url, json=data)\n\nif response.status_code == 200:\n print('PATCH request successful')\n print('Resource updated successfully')\nelse:\n print('PATCH request failed')\n print('Status code:', response.status_code)\n print('Response:', response.text)\n```\n\n在上面的代码中,我们指定了要更新的资源的URL,并使用`requests.patch()`方法发送PATCH请求。我们将要更新的部分数据存储在`data`变量中,并使用`json=data`参数将其作为JSON数据发送到PATCH请求中。\n\n希望这个示例对你有所帮助。如果有任何问题或需要进一步帮助,请随时告诉我。","role":"robot"},{"text":"401错误码","role":"me"},{"text":"HTTP状态码401表示未经授权,需要进行身份验证才能访问资源。当服务器返回401状态码时,表示客户端请求需要提供有效的身份验证信息(如用户名和密码、令牌等)才能继续访问资源。\n\n如果你收到401错误码,可以尝试以下几种方法来解决问题:\n\n1. 提供有效的身份验证信息:检查请求是否包含正确的身份验证信息。根据API文档或服务提供商的要求,提供正确的用户名、密码、令牌等信息。\n\n2. 检查身份验证方式:确保使用正确的身份验证方式。有些API可能要求使用基本身份验证(Basic Authentication),而有些可能要求使用令牌(Token)进行身份验证。\n\n3. 检查权限和访问控制:确保你有权限访问所请求的资源。有些资源可能需要特定的权限或角色才能访问。\n\n4. 刷新令牌:如果使用令牌进行身份验证,尝试刷新令牌并重新发送请求。\n\n5. 检查请求头:确保请求头中包含正确的身份验证信息。有些API要求将身份验证信息放在特定的请求头中。\n\n如果以上方法都无法解决问题,建议查看API文档或联系服务提供商以获取更多帮助和支持。\n\n希望这些信息对你有所帮助。如果有任何问题或需要进一步帮助,请随时告诉我。","role":"robot"},{"text":"400错误码","role":"me"},{"text":"HTTP状态码400表示客户端发送的请求有错误,服务器无法处理该请求。通常情况下,400错误码表示请求语法有误或请求参数不正确,导致服务器无法理解或处理请求。\n\n如果你收到400错误码,可以尝试以下几种方法来解决问题:\n\n1. 检查请求参数:确保请求参数的格式和内容符合API的要求。检查请求中的参数是否正确、完整,并且符合API文档中的规范。\n\n2. 验证请求语法:检查请求的语法是否正确,包括请求方法、请求头、请求体等。确保请求是符合HTTP协议规范的。\n\n3. 使用合适的HTTP方法:确保使用正确的HTTP方法来发送请求。不同的操作可能需要使用不同的HTTP方法(如GET、POST、PUT、DELETE等)。\n\n4. 检查URL格式:确保请求的URL格式正确,包括协议、域名、路径等部分。\n\n5. 查看服务器错误信息:服务器可能会返回更详细的错误信息,可以查看服务器返回的响应体中的错误信息,以便更好地理解问题所在。\n\n如果以上方法都无法解决问题,建议查看API文档或联系服务提供商以获取更多帮助和支持。\n\n希望这些信息对你有所帮助。如果有任何问题或需要进一步帮助,请随时告诉我。","role":"robot"},{"text":"requests通过url下载文件","role":"me"},{"text":"要使用Python的requests库通过URL下载文件,你可以发送一个GET请求来获取文件内容,并将其保存到本地文件中。以下是一个示例代码,演示了如何使用requests库下载文件:\n\n```python\nimport requests\n\nurl = 'http://example.com/file.zip' # 指定要下载的文件URL\nfile_path = 'downloaded_file.zip' # 指定保存文件的本地路径\n\nresponse = requests.get(url)\n\nif response.status_code == 200:\n with open(file_path, 'wb') as file:\n file.write(response.content)\n print('文件下载成功,保存在:', file_path)\nelse:\n print('文件下载失败')\n print('状态码:', response.status_code)\n print('响应内容:', response.text)\n```\n\n在上面的代码中,我们指定了要下载的文件的URL,并发送一个GET请求来获取文件内容。如果请求成功(状态码为200),我们将文件内容写入本地文件中,并保存在指定的路径下。\n\n请注意,`response.content`包含了文件的二进制内容,我们使用`'wb'`模式打开文件以写入二进制内容。\n\n你可以根据需要修改文件的保存路径和文件名。确保文件路径是可写的,并且有足够的空间来保存文件。\n\n希望这个示例对你有所帮助。如果有任何问题或需要进一步帮助,请随时告诉我。","role":"robot"},{"text":"def download(url, file_path):\n # 第一次请求是为了得到文件总大小\n r1 = requests.get(url, stream=True, verify=False)\n total_size = int(r1.headers['Content-Length'])\n\n # 这重要了,先看看本地文件下载了多少\n if os.path.exists(file_path):\n temp_size = os.path.getsize(file_path) # 本地已经下载的文件大小\n else:\n temp_size = 0\n # 显示一下下载了多少\n print(temp_size)\n print(total_size)\n # 核心部分,这个是请求下载时,从本地文件已经下载过的后面下载\n headers = {'Range': 'bytes=%d-' % temp_size}\n # 重新请求网址,加入新的请求头的\n r = requests.get(url, stream=True, verify=False, headers=headers)\n\n # 下面写入文件也要注意,看到\"ab\"了吗?\n # \"ab\"表示追加形式写入文件\n with open(file_path, \"ab\") as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n temp_size += len(chunk)\n f.write(chunk)\n f.flush()\n\n ###这是下载实现进度显示####\n done = int(50 * temp_size / total_size)\n sys.stdout.write(\"\\r[%s%s] %d%%\" % ('█' * done, ' ' * (50 - done), 100 * temp_size / total_size))\n sys.stdout.flush()\n print() # 避免上面\\r 回车符\n\n这个能否加入网速显示","role":"me"},{"text":"要在下载文件的过程中显示下载速度,你可以在代码中添加计算下载速度的逻辑。以下是修改后的代码,包含了计算下载速度并显示下载进度的功能:\n\n```python\nimport requests\nimport os\nimport sys\nimport time\n\ndef download(url, file_path):\n # 第一次请求是为了得到文件总大小\n r1 = requests.get(url, stream=True, verify=False)\n total_size = int(r1.headers['Content-Length'])\n\n # 这重要了,先看看本地文件下载了多少\n if os.path.exists(file_path):\n temp_size = os.path.getsize(file_path) # 本地已经下载的文件大小\n else:\n temp_size = 0\n # 显示一下下载了多少\n print(temp_size)\n print(total_size)\n # 核心部分,这个是请求下载时,从本地文件已经下载过的后面下载\n headers = {'Range': 'bytes=%d-' % temp_size}\n # 重新请求网址,加入新的请求头的\n r = requests.get(url, stream=True, verify=False, headers=headers)\n\n start_time = time.time() # 记录下载开始时间\n with open(file_path, \"ab\") as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n temp_size += len(chunk)\n f.write(chunk)\n f.flush()\n\n # 计算下载速度\n elapsed_time = time.time() - start_time\n download_speed = temp_size / 1024 / elapsed_time # 单位为KB/s\n\n # 显示下载进度和速度\n done = int(50 * temp_size / total_size)\n sys.stdout.write(\"\\r[%s%s] %d%% 下载速度: %.2f KB/s\" % ('█' * done, ' ' * (50 - done), 100 * temp_size / total_size, download_speed))\n sys.stdout.flush()\n print() # 避免上面\\r 回车符\n\n# 调用下载函数\nurl = 'http://example.com/file.zip'\nfile_path = 'downloaded_file.zip'\ndownload(url, file_path)\n```\n\n在上面的代码中,我们添加了计算下载速度的逻辑,并在下载进度显示中显示下载速度(单位为KB/s)。下载速度的计算是根据已下载的文件大小和已经经过的时间来计算的。\n\n希望这个修改后的代码对你有所帮助。如果有任何问题或需要进一步帮助,请随时告诉我。","role":"robot"},{"text":"能否修改成多线程","role":"me"},{"text":"当涉及到文件下载时,多线程可以提高下载速度,因为可以同时从服务器下载文件的不同部分。以下是一个修改后的代码示例,使用多线程来下载文件并显示下载进度和速度:\n\n```python\nimport requests\nimport os\nimport sys\nimport threading\nimport time\n\ndef download_chunk(url, file_path, start_byte, end_byte):\n headers = {'Range': 'bytes=%d-%d' % (start_byte, end_byte)}\n r = requests.get(url, headers=headers, stream=True, verify=False)\n with open(file_path, \"r+b\") as f:\n f.seek(start_byte)\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n\ndef download(url, file_path, num_threads=4):\n r1 = requests.head(url, verify=False)\n total_size = int(r1.headers['Content-Length'])\n\n if os.path.exists(file_path):\n temp_size = os.path.getsize(file_path)\n else:\n temp_size = 0\n\n with open(file_path, \"ab\") as f:\n f.truncate(total_size)\n\n chunk_size = total_size // num_threads\n threads = []\n start_time = time.time()\n\n for i in range(num_threads):\n start_byte = i * chunk_size + temp_size\n end_byte = total_size if i == num_threads - 1 else (i + 1) * chunk_size - 1\n thread = threading.Thread(target=download_chunk, args=(url, file_path, start_byte, end_byte))\n threads.append(thread)\n thread.start()\n\n for thread in threads:\n thread.join()\n\n elapsed_time = time.time() - start_time\n download_speed = total_size / 1024 / elapsed_time\n\n print(\"\\n下载完成,下载速度: %.2f KB/s\" % download_speed)\n\n# 调用多线程下载函数\nurl = 'http://example.com/file.zip'\nfile_path = 'downloaded_file.zip'\ndownload(url, file_path, num_threads=4)\n```\n\n在上面的代码中,我们定义了一个`download_chunk`函数来下载文件的指定部分,并使用多线程来同时下载文件的不同部分。`download`函数将文件分成多个部分,每个部分由一个线程来下载。下载速度的计算仍然是根据已下载的文件大小和已经经过的时间来计算的。\n\n请注意,多线程下载可能会受到服务器限制或网络环境的影响,因此请根据实际情况调整线程数量以获得最佳性能。\n\n希望这个修改后的多线程下载代码对你有所帮助。如果有任何问题或需要进一步帮助,请随时告诉我。","role":"robot"},{"text":"能否加上下载进度和速度显示","role":"me"},{"text":"当使用多线程下载文件时,要显示下载进度和速度,需要在每个线程中更新下载进度和速度,并在主线程中计算并显示整体下载进度和速度。以下是修改后的代码示例,包含了下载进度和速度的显示:\n\n```python\nimport requests\nimport os\nimport sys\nimport threading\nimport time\n\ndef download_chunk(url, file_path, start_byte, end_byte, progress_dict):\n headers = {'Range': 'bytes=%d-%d' % (start_byte, end_byte)}\n r = requests.get(url, headers=headers, stream=True, verify=False)\n with open(file_path, \"r+b\") as f:\n f.seek(start_byte)\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n progress_dict['downloaded'] += len(chunk)\n\ndef download(url, file_path, num_threads=4):\n r1 = requests.head(url, verify=False)\n total_size = int(r1.headers['Content-Length'])\n\n if os.path.exists(file_path):\n temp_size = os.path.getsize(file_path)\n else:\n temp_size = 0\n\n with open(file_path, \"ab\") as f:\n f.truncate(total_size)\n\n chunk_size = total_size // num_threads\n threads = []\n progress_dict = {'downloaded': temp_size}\n start_time = time.time()\n\n for i in range(num_threads):\n start_byte = i * chunk_size + temp_size\n end_byte = total_size if i == num_threads - 1 else (i + 1) * chunk_size - 1\n thread = threading.Thread(target=download_chunk, args=(url, file_path, start_byte, end_byte, progress_dict))\n threads.append(thread)\n thread.start()\n\n while any(thread.is_alive() for thread in threads):\n downloaded = progress_dict['downloaded']\n done = int(50 * downloaded / total_size)\n download_speed = downloaded / 1024 / (time.time() - start_time)\n sys.stdout.write(\"\\r[%s%s] %d%% 下载速度: %.2f KB/s\" % ('█' * done, ' ' * (50 - done), 100 * downloaded / total_size, download_speed))\n sys.stdout.flush()\n time.sleep(0.1)\n\n elapsed_time = time.time() - start_time\n download_speed = total_size / 1024 / elapsed_time\n\n print(\"\\n下载完成,下载速度: %.2f KB/s\" % download_speed)\n\n# 调用多线程下载函数\nurl = 'http://example.com/file.zip'\nfile_path = 'downloaded_file.zip'\ndownload(url, file_path, num_threads=4)\n```\n\n在上面的代码中,我们在每个线程中更新下载进度,并在主线程中计算并显示整体下载进度和速度。下载进度的显示是根据已下载的文件大小和总文件大小来计算的。\n\n希望这个修改后的代码对你有所帮助。如果有任何问题或需要进一步帮助,请随时告诉我。","role":"robot"},{"text":"ZeroDivisionError: division by zero","role":"me"},{"text":"出现`ZeroDivisionError: division by zero`错误通常是因为在计算下载速度时,分母为零导致的。这可能是因为在计算下载速度时,已经经过的时间为零,导致除以零的错误。为了避免这个错误,我们可以在计算下载速度之前检查已经经过的时间是否大于零。以下是修改后的代码示例,修复了这个错误:\n\n```python\nimport requests\nimport os\nimport sys\nimport threading\nimport time\n\ndef download_chunk(url, file_path, start_byte, end_byte, progress_dict):\n headers = {'Range': 'bytes=%d-%d' % (start_byte, end_byte)}\n r = requests.get(url, headers=headers, stream=True, verify=False)\n with open(file_path, \"r+b\") as f:\n f.seek(start_byte)\n for chunk in r.iter_content(chunk_size=1024):\n if chunk:\n f.write(chunk)\n progress_dict['downloaded'] += len(chunk)\n\ndef download(url, file_path, num_threads=4):\n r1 = requests.head(url, verify=False)\n total_size = int(r1.headers['Content-Length'])\n\n if os.path.exists(file_path):\n temp_size = os.path.getsize(file_path)\n else:\n temp_size = 0\n\n with open(file_path, \"ab\") as f:\n f.truncate(total_size)\n\n chunk_size = total_size // num_threads\n threads = []\n progress_dict = {'downloaded': temp_size}\n start_time = time.time()\n\n for i in range(num_threads):\n start_byte = i * chunk_size + temp_size\n end_byte = total_size if i == num_threads - 1 else (i + 1) * chunk_size - 1\n thread = threading.Thread(target=download_chunk, args=(url, file_path, start_byte, end_byte, progress_dict))\n threads.append(thread)\n thread.start()\n\n while any(thread.is_alive() for thread in threads):\n downloaded = progress_dict['downloaded']\n done = int(50 * downloaded / total_size)\n elapsed_time = time.time() - start_time\n download_speed = downloaded / 1024 / elapsed_time if elapsed_time > 0 else 0\n sys.stdout.write(\"\\r[%s%s] %d%% 下载速度: %.2f KB/s\" % ('█' * done, ' ' * (50 - done), 100 * downloaded / total_size, download_speed))\n sys.stdout.flush()\n time.sleep(0.1)\n\n download_speed = total_size / 1024 / elapsed_time if elapsed_time > 0 else 0\n\n print(\"\\n下载完成,下载速度: %.2f KB/s\" % download_speed)\n\n# 调用多线程下载函数\nurl = 'http://example.com/file.zip'\nfile_path = 'downloaded_file.zip'\ndownload(url, file_path, num_threads=4)\n```\n\n在上面的代码中,我们在计算下载速度之前添加了一个条件判断,确保已经经过的时间大于零。这样可以避免出现`ZeroDivisionError`错误。\n\n希望这个修改后的代码能够解决问题。如果还有其他问题或需要进一步帮助,请随时告诉我。","role":"robot"}],"id":1708601080458}]} \ No newline at end of file diff --git a/java环境地址.txt b/java环境地址.txt new file mode 100644 index 0000000..0c266a5 --- /dev/null +++ b/java环境地址.txt @@ -0,0 +1,2 @@ +https://mvnrepository.com/artifact/mysql/mysql-connector-java/8.0.28 +https://repo.huaweicloud.com/java/jdk/8u151-b12/ \ No newline at end of file diff --git a/linux安装微信小程序开发环境.txt b/linux安装微信小程序开发环境.txt new file mode 100644 index 0000000..1a17248 --- /dev/null +++ b/linux安装微信小程序开发环境.txt @@ -0,0 +1,7 @@ +cd 桌面 +git clone https://github.com/cytle/wechat_web_devtools.git +cd wechat_web_devtools +cd bin +./wxdt install +#启动 +./wxdt \ No newline at end of file diff --git a/mysql语法笔记.txt b/mysql语法笔记.txt new file mode 100644 index 0000000..2d3cd69 --- /dev/null +++ b/mysql语法笔记.txt @@ -0,0 +1,19 @@ +-- 创建表 书籍表 +CREATE TABLE t_book( + id INT, + name VARCHAR(30), + author VARCHAR(10), + price DOUBLE +) + +select * from t_book; + +insert into t_book(id, name, author, price) values (1, "项目驱动零起点学java", "马士兵、赵珊珊", 69.8); +insert into t_book(id, name, author, price) values (2, "活着", "余华", 45); + +DELETE FROM t_book WHERE id=2; + +UPDATE t_book set price=36.6 WHERE id=1; + +SELECT name, author FROM t_book; +SELECT * FROM t_book WHERE price > 40; diff --git a/openMaya开发.txt b/openMaya开发.txt new file mode 100644 index 0000000..5cc6d90 --- /dev/null +++ b/openMaya开发.txt @@ -0,0 +1,5 @@ +浅谈OpenMaya的使用 7:配置VisualStudio Maya C++ 插件开发 - 知乎 + +https://zhuanlan.zhihu.com/p/616220191 + + diff --git a/postgresql安装错误.txt b/postgresql安装错误.txt new file mode 100644 index 0000000..f634682 --- /dev/null +++ b/postgresql安装错误.txt @@ -0,0 +1,41 @@ +  问题:sudo service postgresql start 启动后 发现连不上数据库,psql 和msfconsole都提示: + +could not connect to database template1: could not connect to server: No such file or directory. + +Is the server running locally and accepting + +connections on Unix domain socket "var/run/postgresql/.s.PGSQL.5432"? + +发现并没有监听默认5432端口。 + +多次尝试后发现,并没有初始化数据库,现记录下初始化并启动过程(postgresql安装完毕): + +1.给数据库的postgres用户设置密码: + +$ sudo passwd postgres + + +2.创建postgresql的data文件夹 + +$ sudo mkdir /data +$ sudo chmod o+w /data +$ su - postgres +$ mkdir /data/postgresql +$ mkdir /data/postgresql/data +3.postgres用户初始化数据库 + +$ /usr/lib/postgresql/12/bin/initdb -D /data/postgresql/data +初始化看到几个OK之后,最后Success,然后提示启动数据库命令:pg_ctl ... + + + +4.postgres用户启动数据库 + +/usr/lib/postgresql/12/bin/pg_ctl -D /data/postgresql/data -l logfile start +waiting for server to start .... done + +server started + +5.检查是否监听端口 + +$ ss -ant \ No newline at end of file diff --git a/python多进程.txt b/python多进程.txt new file mode 100644 index 0000000..d729d28 --- /dev/null +++ b/python多进程.txt @@ -0,0 +1,94 @@ + +import asyncio +import multiprocessing +import os +import time +from multiprocessing import Manager + + +# 业务类 +class BaiJiaHao(): + + async def get_author(self, rec): + """ + 协程代码 + """ + print('enter get author,wait for: %d' % rec['num']) + # 模拟IO操作,耗时根据传进来的num决定 + await asyncio.sleep(rec['num']) + # 返回协程任务完成后的结果 + return rec + + + def run(self): + # 假定我们有11个任务要跑,每个任务耗时为num秒,串行的话需要43秒。 + # 但我们这个demo跑完只需要这些任务中的最大值:8秒 + list = [{'title': 'title1', 'num': 2}, + {'title': 'title2', 'num': 1}, + {'title': 'title3', 'num': 3}, + {'title': 'title4', 'num': 8}, + {'title': 'title5', 'num': 2}, + {'title': 'title6', 'num': 5}, + {'title': 'title7', 'num': 7}, + {'title': 'title8', 'num': 3}, + {'title': 'title9', 'num': 4}, + {'title': 'title10', 'num': 3}, + {'title': 'title11', 'num': 5}, + ] + result = run_get_author_in_multi_process(list) + print('result', result) + + +def get_chunks(iterable, chunks=1): + """ + 此函数用于分割若干任务到不同的进程里去 + """ + lst = list(iterable) + return [lst[i::chunks] for i in range(chunks)] + + +def run_get_author(lists, queue): + """ + 这个就是子进程运行的函数,接收任务列表和用于进程间通讯的Queue + """ + print('exec run_get_author.child process id : %s, parent process id : %s' % (os.getpid(), os.getppid())) + # 每个子进程分配一个新的loop + loop = asyncio.new_event_loop() + # 初始化业务类,转成task或future + spider = BaiJiaHao() + tasks = [loop.create_task(spider.get_author(rec)) for rec in lists] + # 协程走起 + loop.run_until_complete(asyncio.wait(tasks)) + # 往queue写入每个任务的结果 + for task in tasks: + queue.put(task.result()) + + +def run_get_author_in_multi_process(task_lists): + """ + 父进程函数,主要是分割任务并初始化进程池,启动进程并返回结果 + """ + # process_count = len(tasks) % 2 + # 进程数这里我用机器上的核心数,注意:未考虑核心数比任务多的情况 + process_count = multiprocessing.cpu_count() + print('process_count: %d' % process_count) + split_lists = get_chunks(task_lists, process_count) + pool = multiprocessing.Pool(process_count) + queue = Manager().Queue() + for lists in split_lists: + pool.apply_async(run_get_author, args=(lists, queue,)) + pool.close() + pool.join() + result = [] + # 从子进程读取结果并返回 + while not queue.empty(): + result.append(queue.get()) + return result + +now = lambda : time.time() + +if __name__ == '__main__': + start = now() + spider = BaiJiaHao() + spider.run() + print('done','TIME: ', now() - start) \ No newline at end of file diff --git a/redis配置文件.txt b/redis配置文件.txt new file mode 100644 index 0000000..5997c87 --- /dev/null +++ b/redis配置文件.txt @@ -0,0 +1,1378 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Notice option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule /path/to/my_module.so +# loadmodule /path/to/other_module.so + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all the network interfaces available on the server. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 +# bind 127.0.0.1 ::1 +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only into +# the IPv4 loopback interface address (this means Redis will be able to +# accept connections only from clients running into the same computer it +# is running). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +#bind 0.0.0.0 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode no + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need an high backlog in order +# to avoid slow clients connections issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Take the connection alive from the point of view of network +# equipment in the middle. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize yes + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous liveness pings back to your supervisor. +supervised no + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY. Basically this means +# that normally a logo is displayed only in interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo yes + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# For default that's set to 'yes' as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) if replica-serve-stale-data is set to 'no' the replica will reply with +# an error "SYNC with master in progress" to all the kind of commands +# but to INFO, replicaOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, +# SUBSCRIBE, UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, +# COMMAND, POST, HOST: and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# ------------------------------------------------------- +# WARNING: DISKLESS REPLICATION IS EXPERIMENTAL CURRENTLY +# ------------------------------------------------------- +# +# New replicas and reconnecting replicas that are not able to continue the replication +# process just receiving differences, need to do what is called a "full +# synchronization". An RDB file is transmitted from the master to the replicas. +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child producing +# the RDB file finishes its work. With diskless replication instead once +# the transfer starts, new replicas arriving will be queued and a new transfer +# will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple replicas +# will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the server +# waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# Replicas send PINGs to server in a predefined interval. It's possible to change +# this interval with the repl_ping_replica_period option. The default value is 10 +# seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a replica +# wants to reconnect again, often a full resync is not needed, but a partial +# resync is enough, just passing the portion of data the replica missed while +# disconnected. +# +# The bigger the replication backlog, the longer the time the replica can be +# disconnected and later be able to perform a partial resynchronization. +# +# The backlog is only allocated once there is at least a replica connected. +# +# repl-backlog-size 1mb + +# After a master has no longer connected replicas for some time, the backlog +# will be freed. The following option configures the amount of seconds that +# need to elapse, starting from the time the last replica disconnected, for +# the backlog buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with the replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO output. +# It is used by Redis Sentinel in order to select a replica to promote into a +# master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel will +# pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP and address normally reported by a replica is obtained +# in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may be actually reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +################################## SECURITY ################################### + +# Require clients to issue AUTH before processing any other +# commands. This might be useful in environments in which you do not trust +# others with access to the host running redis-server. +# +# This should stay commented out for backward compatibility and because most +# people do not need auth (e.g. they run their own servers). +# +# Warning: since Redis is pretty fast an outside user can try up to +# 150k passwords per second against a good box. This means that you should +# use a very strong password otherwise it will be very easy to break. +# +# requirepass foobared + +# Command renaming. +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select among five behaviors: +# +# volatile-lru -> Evict using approximated LRU among the keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU among the keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key among the ones with an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. For default Redis will check five keys and pick the one that was +# used less recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica to have +# a different memory setting, and you are sure all the writes performed to the +# replica are idempotent, then you may change this default (but be sure to understand +# what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory and so +# forth). So make sure you monitor your replicas and make sure they have enough +# memory to never hit a real out-of-memory condition before the master hits +# the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives: + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# When rewriting the AOF file, Redis is able to use an RDB preamble in the +# AOF file for faster rewrites and recoveries. When this option is turned +# on the rewritten AOF file is composed of two different stanzas: +# +# [RDB file][AOF tail] +# +# When loading Redis recognizes that the AOF file starts with the "REDIS" +# string and loads the prefixed RDB file, and continues loading the AOF +# tail. +aof-use-rdb-preamble yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet called write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### +# +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# WARNING EXPERIMENTAL: Redis Cluster is considered to be stable code, however +# in order to mark it as "mature" we need to wait for a non trivial percentage +# of users to deploy it in production. +# ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ +# +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least an hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the master can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following two options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-bus-port +# +# Each instruct the node about its address, client port, and cluster message +# bus port. The information is then published in the header of the bus packets +# so that other nodes will be able to correctly map the address of the node +# publishing the information. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usually. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-port 6379 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# A Alias for g$lshzxe, so that the "AKE" string means all the events. +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entires limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited ot 512 mb. However you can change this limit +# here. +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporary raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used as +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A Special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# WARNING THIS FEATURE IS EXPERIMENTAL. However it was stress tested +# even in production and manually tested by multiple engineers for some +# time. +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in an "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Enabled active defragmentation +# activedefrag yes + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage +# active-defrag-cycle-min 5 + +# Maximal effort for defrag in CPU percentage +# active-defrag-cycle-max 75 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + diff --git a/usd_object.txt b/usd_object.txt new file mode 100644 index 0000000..6585adc --- /dev/null +++ b/usd_object.txt @@ -0,0 +1,335 @@ +#usda 1.0 +( + defaultPrim = "pCube1" + framesPerSecond = 24 + metersPerUnit = 0.01 + timeCodesPerSecond = 24 + upAxis = "Y" +) + +def Mesh "pPlane1" ( + prepend apiSchemas = ["ArnoldPolymeshAPI", "MaterialBindingAPI"] + kind = "component" +) +{ + uniform bool doubleSided = 1 + float3[] extent = [(-0.5, 0, -0.5), (0.5, 0, 0.5)] + int[] faceVertexCounts = [4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4] + int[] faceVertexIndices = [0, 1, 12, 11, 1, 2, 13, 12, 2, 3, 14, 13, 3, 4, 15, 14, 4, 5, 16, 15, 5, 6, 17, 16, 6, 7, 18, 17, 7, 8, 19, 18, 8, 9, 20, 19, 9, 10, 21, 20, 11, 12, 23, 22, 12, 13, 24, 23, 13, 14, 25, 24, 14, 15, 26, 25, 15, 16, 27, 26, 16, 17, 28, 27, 17, 18, 29, 28, 18, 19, 30, 29, 19, 20, 31, 30, 20, 21, 32, 31, 22, 23, 34, 33, 23, 24, 35, 34, 24, 25, 36, 35, 25, 26, 37, 36, 26, 27, 38, 37, 27, 28, 39, 38, 28, 29, 40, 39, 29, 30, 41, 40, 30, 31, 42, 41, 31, 32, 43, 42, 33, 34, 45, 44, 34, 35, 46, 45, 35, 36, 47, 46, 36, 37, 48, 47, 37, 38, 49, 48, 38, 39, 50, 49, 39, 40, 51, 50, 40, 41, 52, 51, 41, 42, 53, 52, 42, 43, 54, 53, 44, 45, 56, 55, 45, 46, 57, 56, 46, 47, 58, 57, 47, 48, 59, 58, 48, 49, 60, 59, 49, 50, 61, 60, 50, 51, 62, 61, 51, 52, 63, 62, 52, 53, 64, 63, 53, 54, 65, 64, 55, 56, 67, 66, 56, 57, 68, 67, 57, 58, 69, 68, 58, 59, 70, 69, 59, 60, 71, 70, 60, 61, 72, 71, 61, 62, 73, 72, 62, 63, 74, 73, 63, 64, 75, 74, 64, 65, 76, 75, 66, 67, 78, 77, 67, 68, 79, 78, 68, 69, 80, 79, 69, 70, 81, 80, 70, 71, 82, 81, 71, 72, 83, 82, 72, 73, 84, 83, 73, 74, 85, 84, 74, 75, 86, 85, 75, 76, 87, 86, 77, 78, 89, 88, 78, 79, 90, 89, 79, 80, 91, 90, 80, 81, 92, 91, 81, 82, 93, 92, 82, 83, 94, 93, 83, 84, 95, 94, 84, 85, 96, 95, 85, 86, 97, 96, 86, 87, 98, 97, 88, 89, 100, 99, 89, 90, 101, 100, 90, 91, 102, 101, 91, 92, 103, 102, 92, 93, 104, 103, 93, 94, 105, 104, 94, 95, 106, 105, 95, 96, 107, 106, 96, 97, 108, 107, 97, 98, 109, 108, 99, 100, 111, 110, 100, 101, 112, 111, 101, 102, 113, 112, 102, 103, 114, 113, 103, 104, 115, 114, 104, 105, 116, 115, 105, 106, 117, 116, 106, 107, 118, 117, 107, 108, 119, 118, 108, 109, 120, 119] + rel material:binding = + uniform token orientation = "rightHanded" + point3f[] points = [(-0.5, 0, 0.5), (-0.4, 0, 0.5), (-0.3, 0, 0.5), (-0.19999999, 0, 0.5), (-0.099999994, 0, 0.5), (0, 0, 0.5), (0.100000024, 0, 0.5), (0.19999999, 0, 0.5), (0.3, 0, 0.5), (0.40000004, 0, 0.5), (0.5, 0, 0.5), (-0.5, 0, 0.4), (-0.4, 0, 0.4), (-0.3, 0, 0.4), (-0.19999999, 0, 0.4), (-0.099999994, 0, 0.4), (0, 0, 0.4), (0.100000024, 0, 0.4), (0.19999999, 0, 0.4), (0.3, 0, 0.4), (0.40000004, 0, 0.4), (0.5, 0, 0.4), (-0.5, 0, 0.3), (-0.4, 0, 0.3), (-0.3, 0, 0.3), (-0.19999999, 0, 0.3), (-0.099999994, 0, 0.3), (0, 0, 0.3), (0.100000024, 0, 0.3), (0.19999999, 0, 0.3), (0.3, 0, 0.3), (0.40000004, 0, 0.3), (0.5, 0, 0.3), (-0.5, 0, 0.19999999), (-0.4, 0, 0.19999999), (-0.3, 0, 0.19999999), (-0.19999999, 0, 0.19999999), (-0.099999994, 0, 0.19999999), (0, 0, 0.19999999), (0.100000024, 0, 0.19999999), (0.19999999, 0, 0.19999999), (0.3, 0, 0.19999999), (0.40000004, 0, 0.19999999), (0.5, 0, 0.19999999), (-0.5, 0, 0.099999994), (-0.4, 0, 0.099999994), (-0.3, 0, 0.099999994), (-0.19999999, 0, 0.099999994), (-0.099999994, 0, 0.099999994), (0, 0, 0.099999994), (0.100000024, 0, 0.099999994), (0.19999999, 0, 0.099999994), (0.3, 0, 0.099999994), (0.40000004, 0, 0.099999994), (0.5, 0, 0.099999994), (-0.5, 0, 0), (-0.4, 0, 0), (-0.3, 0, 0), (-0.19999999, 0, 0), (-0.099999994, 0, 0), (0, 0, 0), (0.100000024, 0, 0), (0.19999999, 0, 0), (0.3, 0, 0), (0.40000004, 0, 0), (0.5, 0, 0), (-0.5, 0, -0.100000024), (-0.4, 0, -0.100000024), (-0.3, 0, -0.100000024), (-0.19999999, 0, -0.100000024), (-0.099999994, 0, -0.100000024), (0, 0, -0.100000024), (0.100000024, 0, -0.100000024), (0.19999999, 0, -0.100000024), (0.3, 0, -0.100000024), (0.40000004, 0, -0.100000024), (0.5, 0, -0.100000024), (-0.5, 0, -0.19999999), (-0.4, 0, -0.19999999), (-0.3, 0, -0.19999999), (-0.19999999, 0, -0.19999999), (-0.099999994, 0, -0.19999999), (0, 0, -0.19999999), (0.100000024, 0, -0.19999999), (0.19999999, 0, -0.19999999), (0.3, 0, -0.19999999), (0.40000004, 0, -0.19999999), (0.5, 0, -0.19999999), (-0.5, 0, -0.3), (-0.4, 0, -0.3), (-0.3, 0, -0.3), (-0.19999999, 0, -0.3), (-0.099999994, 0, -0.3), (0, 0, -0.3), (0.100000024, 0, -0.3), (0.19999999, 0, -0.3), (0.3, 0, -0.3), (0.40000004, 0, -0.3), (0.5, 0, -0.3), (-0.5, 0, -0.40000004), (-0.4, 0, -0.40000004), (-0.3, 0, -0.40000004), (-0.19999999, 0, -0.40000004), (-0.099999994, 0, -0.40000004), (0, 0, -0.40000004), (0.100000024, 0, -0.40000004), (0.19999999, 0, -0.40000004), (0.3, 0, -0.40000004), (0.40000004, 0, -0.40000004), (0.5, 0, -0.40000004), (-0.5, 0, -0.5), (-0.4, 0, -0.5), (-0.3, 0, -0.5), (-0.19999999, 0, -0.5), (-0.099999994, 0, -0.5), (0, 0, -0.5), (0.100000024, 0, -0.5), (0.19999999, 0, -0.5), (0.3, 0, -0.5), (0.40000004, 0, -0.5), (0.5, 0, -0.5)] + uint primvars:arnold:id = 404087512 + uchar primvars:arnold:subdiv_iterations = 1 + string primvars:dcc_name = "pPlaneShape1" ( + elementSize = 1 + interpolation = "constant" + ) + vector3f[] primvars:normals = [(0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0)] ( + elementSize = 121 + interpolation = "faceVarying" + ) + int[] primvars:normals:indices = [0, 1, 2, 3, 1, 4, 5, 2, 4, 6, 7, 5, 6, 8, 9, 7, 8, 10, 11, 9, 10, 12, 13, 11, 12, 14, 15, 13, 14, 16, 17, 15, 16, 18, 19, 17, 18, 20, 21, 19, 3, 2, 22, 23, 2, 5, 24, 22, 5, 7, 25, 24, 7, 9, 26, 25, 9, 11, 27, 26, 11, 13, 28, 27, 13, 15, 29, 28, 15, 17, 30, 29, 17, 19, 31, 30, 19, 21, 32, 31, 23, 22, 33, 34, 22, 24, 35, 33, 24, 25, 36, 35, 25, 26, 37, 36, 26, 27, 38, 37, 27, 28, 39, 38, 28, 29, 40, 39, 29, 30, 41, 40, 30, 31, 42, 41, 31, 32, 43, 42, 34, 33, 44, 45, 33, 35, 46, 44, 35, 36, 47, 46, 36, 37, 48, 47, 37, 38, 49, 48, 38, 39, 50, 49, 39, 40, 51, 50, 40, 41, 52, 51, 41, 42, 53, 52, 42, 43, 54, 53, 45, 44, 55, 56, 44, 46, 57, 55, 46, 47, 58, 57, 47, 48, 59, 58, 48, 49, 60, 59, 49, 50, 61, 60, 50, 51, 62, 61, 51, 52, 63, 62, 52, 53, 64, 63, 53, 54, 65, 64, 56, 55, 66, 67, 55, 57, 68, 66, 57, 58, 69, 68, 58, 59, 70, 69, 59, 60, 71, 70, 60, 61, 72, 71, 61, 62, 73, 72, 62, 63, 74, 73, 63, 64, 75, 74, 64, 65, 76, 75, 67, 66, 77, 78, 66, 68, 79, 77, 68, 69, 80, 79, 69, 70, 81, 80, 70, 71, 82, 81, 71, 72, 83, 82, 72, 73, 84, 83, 73, 74, 85, 84, 74, 75, 86, 85, 75, 76, 87, 86, 78, 77, 88, 89, 77, 79, 90, 88, 79, 80, 91, 90, 80, 81, 92, 91, 81, 82, 93, 92, 82, 83, 94, 93, 83, 84, 95, 94, 84, 85, 96, 95, 85, 86, 97, 96, 86, 87, 98, 97, 89, 88, 99, 100, 88, 90, 101, 99, 90, 91, 102, 101, 91, 92, 103, 102, 92, 93, 104, 103, 93, 94, 105, 104, 94, 95, 106, 105, 95, 96, 107, 106, 96, 97, 108, 107, 97, 98, 109, 108, 100, 99, 110, 111, 99, 101, 112, 110, 101, 102, 113, 112, 102, 103, 114, 113, 103, 104, 115, 114, 104, 105, 116, 115, 105, 106, 117, 116, 106, 107, 118, 117, 107, 108, 119, 118, 108, 109, 120, 119] + texCoord2f[] primvars:st = [(0, 0), (0.1, 0), (0.2, 0), (0.3, 0), (0.4, 0), (0.5, 0), (0.6, 0), (0.7, 0), (0.8, 0), (0.90000004, 0), (1, 0), (0, 0.1), (0.1, 0.1), (0.2, 0.1), (0.3, 0.1), (0.4, 0.1), (0.5, 0.1), (0.6, 0.1), (0.7, 0.1), (0.8, 0.1), (0.90000004, 0.1), (1, 0.1), (0, 0.2), (0.1, 0.2), (0.2, 0.2), (0.3, 0.2), (0.4, 0.2), (0.5, 0.2), (0.6, 0.2), (0.7, 0.2), (0.8, 0.2), (0.90000004, 0.2), (1, 0.2), (0, 0.3), (0.1, 0.3), (0.2, 0.3), (0.3, 0.3), (0.4, 0.3), (0.5, 0.3), (0.6, 0.3), (0.7, 0.3), (0.8, 0.3), (0.90000004, 0.3), (1, 0.3), (0, 0.4), (0.1, 0.4), (0.2, 0.4), (0.3, 0.4), (0.4, 0.4), (0.5, 0.4), (0.6, 0.4), (0.7, 0.4), (0.8, 0.4), (0.90000004, 0.4), (1, 0.4), (0, 0.5), (0.1, 0.5), (0.2, 0.5), (0.3, 0.5), (0.4, 0.5), (0.5, 0.5), (0.6, 0.5), (0.7, 0.5), (0.8, 0.5), (0.90000004, 0.5), (1, 0.5), (0, 0.6), (0.1, 0.6), (0.2, 0.6), (0.3, 0.6), (0.4, 0.6), (0.5, 0.6), (0.6, 0.6), (0.7, 0.6), (0.8, 0.6), (0.90000004, 0.6), (1, 0.6), (0, 0.7), (0.1, 0.7), (0.2, 0.7), (0.3, 0.7), (0.4, 0.7), (0.5, 0.7), (0.6, 0.7), (0.7, 0.7), (0.8, 0.7), (0.90000004, 0.7), (1, 0.7), (0, 0.8), (0.1, 0.8), (0.2, 0.8), (0.3, 0.8), (0.4, 0.8), (0.5, 0.8), (0.6, 0.8), (0.7, 0.8), (0.8, 0.8), (0.90000004, 0.8), (1, 0.8), (0, 0.90000004), (0.1, 0.90000004), (0.2, 0.90000004), (0.3, 0.90000004), (0.4, 0.90000004), (0.5, 0.90000004), (0.6, 0.90000004), (0.7, 0.90000004), (0.8, 0.90000004), (0.90000004, 0.90000004), (1, 0.90000004), (0, 1), (0.1, 1), (0.2, 1), (0.3, 1), (0.4, 1), (0.5, 1), (0.6, 1), (0.7, 1), (0.8, 1), (0.90000004, 1), (1, 1)] ( + customData = { + dictionary Maya = { + token name = "map1" + } + } + interpolation = "faceVarying" + ) + int[] primvars:st:indices = [0, 1, 12, 11, 1, 2, 13, 12, 2, 3, 14, 13, 3, 4, 15, 14, 4, 5, 16, 15, 5, 6, 17, 16, 6, 7, 18, 17, 7, 8, 19, 18, 8, 9, 20, 19, 9, 10, 21, 20, 11, 12, 23, 22, 12, 13, 24, 23, 13, 14, 25, 24, 14, 15, 26, 25, 15, 16, 27, 26, 16, 17, 28, 27, 17, 18, 29, 28, 18, 19, 30, 29, 19, 20, 31, 30, 20, 21, 32, 31, 22, 23, 34, 33, 23, 24, 35, 34, 24, 25, 36, 35, 25, 26, 37, 36, 26, 27, 38, 37, 27, 28, 39, 38, 28, 29, 40, 39, 29, 30, 41, 40, 30, 31, 42, 41, 31, 32, 43, 42, 33, 34, 45, 44, 34, 35, 46, 45, 35, 36, 47, 46, 36, 37, 48, 47, 37, 38, 49, 48, 38, 39, 50, 49, 39, 40, 51, 50, 40, 41, 52, 51, 41, 42, 53, 52, 42, 43, 54, 53, 44, 45, 56, 55, 45, 46, 57, 56, 46, 47, 58, 57, 47, 48, 59, 58, 48, 49, 60, 59, 49, 50, 61, 60, 50, 51, 62, 61, 51, 52, 63, 62, 52, 53, 64, 63, 53, 54, 65, 64, 55, 56, 67, 66, 56, 57, 68, 67, 57, 58, 69, 68, 58, 59, 70, 69, 59, 60, 71, 70, 60, 61, 72, 71, 61, 62, 73, 72, 62, 63, 74, 73, 63, 64, 75, 74, 64, 65, 76, 75, 66, 67, 78, 77, 67, 68, 79, 78, 68, 69, 80, 79, 69, 70, 81, 80, 70, 71, 82, 81, 71, 72, 83, 82, 72, 73, 84, 83, 73, 74, 85, 84, 74, 75, 86, 85, 75, 76, 87, 86, 77, 78, 89, 88, 78, 79, 90, 89, 79, 80, 91, 90, 80, 81, 92, 91, 81, 82, 93, 92, 82, 83, 94, 93, 83, 84, 95, 94, 84, 85, 96, 95, 85, 86, 97, 96, 86, 87, 98, 97, 88, 89, 100, 99, 89, 90, 101, 100, 90, 91, 102, 101, 91, 92, 103, 102, 92, 93, 104, 103, 93, 94, 105, 104, 94, 95, 106, 105, 95, 96, 107, 106, 96, 97, 108, 107, 97, 98, 109, 108, 99, 100, 111, 110, 100, 101, 112, 111, 101, 102, 113, 112, 102, 103, 114, 113, 103, 104, 115, 114, 104, 105, 116, 115, 105, 106, 117, 116, 106, 107, 118, 117, 107, 108, 119, 118, 108, 109, 120, 119] + uniform token subdivisionScheme = "none" +} + +def Mesh "pCube1" ( + prepend apiSchemas = ["ArnoldPolymeshAPI", "MaterialBindingAPI"] + kind = "component" +) +{ + uniform bool doubleSided = 1 + float3[] extent = [(-0.5, -0.5, -0.5), (0.5, 0.5, 0.5)] + int[] faceVertexCounts = [4, 4, 4, 4, 4, 4] + int[] faceVertexIndices = [0, 1, 3, 2, 2, 3, 5, 4, 4, 5, 7, 6, 6, 7, 1, 0, 1, 7, 5, 3, 6, 0, 2, 4] + rel material:binding = + uniform token orientation = "rightHanded" + point3f[] points = [(-0.5, -0.5, 0.5), (0.5, -0.5, 0.5), (-0.5, 0.5, 0.5), (0.5, 0.5, 0.5), (-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), (-0.5, -0.5, -0.5), (0.5, -0.5, -0.5)] + uint primvars:arnold:id = 137147339 + uchar primvars:arnold:subdiv_iterations = 1 + string primvars:dcc_name = "pCubeShape1" ( + elementSize = 1 + interpolation = "constant" + ) + vector3f[] primvars:normals = [(0, 0, 1), (0, 0, 1), (0, 0, 1), (0, 0, 1), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 0, -1), (0, 0, -1), (0, 0, -1), (0, 0, -1), (0, -1, 0), (0, -1, 0), (0, -1, 0), (0, -1, 0), (1, 0, 0), (1, 0, 0), (1, 0, 0), (1, 0, 0), (-1, 0, 0), (-1, 0, 0), (-1, 0, 0), (-1, 0, 0)] ( + elementSize = 24 + interpolation = "faceVarying" + ) + int[] primvars:normals:indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + texCoord2f[] primvars:st = [(0.375, 0), (0.625, 0), (0.375, 0.25), (0.625, 0.25), (0.375, 0.5), (0.625, 0.5), (0.375, 0.75), (0.625, 0.75), (0.375, 1), (0.625, 1), (0.875, 0), (0.875, 0.25), (0.125, 0), (0.125, 0.25)] ( + customData = { + dictionary Maya = { + token name = "map1" + } + } + interpolation = "faceVarying" + ) + int[] primvars:st:indices = [0, 1, 3, 2, 2, 3, 5, 4, 4, 5, 7, 6, 6, 7, 9, 8, 1, 10, 11, 3, 12, 0, 2, 13] + uniform token subdivisionScheme = "none" + float3 xformOp:scale = (0.13136616, 0.13136616, 0.13136616) + double3 xformOp:translate = (0, 0.06700861885021758, 0) + uniform token[] xformOpOrder = ["xformOp:translate", "xformOp:scale"] + + def GeomSubset "back" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [2] + } + + def GeomSubset "bottom" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [3] + } + + def GeomSubset "front" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [0] + } + + def GeomSubset "left" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [5] + } + + def GeomSubset "right" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [4] + } + + def GeomSubset "top" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [1] + } +} + +def Mesh "pCube2" ( + prepend apiSchemas = ["ArnoldPolymeshAPI", "MaterialBindingAPI"] + kind = "component" +) +{ + uniform bool doubleSided = 1 + float3[] extent = [(-0.5, -0.5, -0.5), (0.5, 0.5, 0.5)] + int[] faceVertexCounts = [4, 4, 4, 4, 4, 4] + int[] faceVertexIndices = [0, 1, 3, 2, 2, 3, 5, 4, 4, 5, 7, 6, 6, 7, 1, 0, 1, 7, 5, 3, 6, 0, 2, 4] + rel material:binding = + uniform token orientation = "rightHanded" + point3f[] points = [(-0.5, -0.5, 0.5), (0.5, -0.5, 0.5), (-0.5, 0.5, 0.5), (0.5, 0.5, 0.5), (-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), (-0.5, -0.5, -0.5), (0.5, -0.5, -0.5)] + uint primvars:arnold:id = 1676049007 + uchar primvars:arnold:subdiv_iterations = 1 + string primvars:dcc_name = "pCubeShape2" ( + elementSize = 1 + interpolation = "constant" + ) + vector3f[] primvars:normals = [(0, 0, 1), (0, 0, 1), (0, 0, 1), (0, 0, 1), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 0, -1), (0, 0, -1), (0, 0, -1), (0, 0, -1), (0, -1, 0), (0, -1, 0), (0, -1, 0), (0, -1, 0), (1, 0, 0), (1, 0, 0), (1, 0, 0), (1, 0, 0), (-1, 0, 0), (-1, 0, 0), (-1, 0, 0), (-1, 0, 0)] ( + elementSize = 24 + interpolation = "faceVarying" + ) + int[] primvars:normals:indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + texCoord2f[] primvars:st = [(0.375, 0), (0.625, 0), (0.375, 0.25), (0.625, 0.25), (0.375, 0.5), (0.625, 0.5), (0.375, 0.75), (0.625, 0.75), (0.375, 1), (0.625, 1), (0.875, 0), (0.875, 0.25), (0.125, 0), (0.125, 0.25)] ( + customData = { + dictionary Maya = { + token name = "map1" + } + } + interpolation = "faceVarying" + ) + int[] primvars:st:indices = [0, 1, 3, 2, 2, 3, 5, 4, 4, 5, 7, 6, 6, 7, 9, 8, 1, 10, 11, 3, 12, 0, 2, 13] + uniform token subdivisionScheme = "none" + float3 xformOp:scale = (0.13136616, 0.13136616, 0.13136616) + double3 xformOp:translate = (0, 0.06700861885021758, 0.18436951007270105) + uniform token[] xformOpOrder = ["xformOp:translate", "xformOp:scale"] + + def GeomSubset "back" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [2] + } + + def GeomSubset "bottom" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [3] + } + + def GeomSubset "front" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [0] + } + + def GeomSubset "left" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [5] + } + + def GeomSubset "right" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [4] + } + + def GeomSubset "top" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [1] + } +} + +def Mesh "pCube3" ( + prepend apiSchemas = ["ArnoldPolymeshAPI", "MaterialBindingAPI"] + kind = "component" +) +{ + uniform bool doubleSided = 1 + float3[] extent = [(-0.5, -0.5, -0.5), (0.5, 0.5, 0.5)] + int[] faceVertexCounts = [4, 4, 4, 4, 4, 4] + int[] faceVertexIndices = [0, 1, 3, 2, 2, 3, 5, 4, 4, 5, 7, 6, 6, 7, 1, 0, 1, 7, 5, 3, 6, 0, 2, 4] + rel material:binding = + uniform token orientation = "rightHanded" + point3f[] points = [(-0.5, -0.5, 0.5), (0.5, -0.5, 0.5), (-0.5, 0.5, 0.5), (0.5, 0.5, 0.5), (-0.5, 0.5, -0.5), (0.5, 0.5, -0.5), (-0.5, -0.5, -0.5), (0.5, -0.5, -0.5)] + uint primvars:arnold:id = 2277356065 + uchar primvars:arnold:subdiv_iterations = 1 + bool primvars:arnold:visibility:camera = 0 + string primvars:dcc_name = "pCubeShape3" ( + elementSize = 1 + interpolation = "constant" + ) + vector3f[] primvars:normals = [(0, 0, 1), (0, 0, 1), (0, 0, 1), (0, 0, 1), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 0, -1), (0, 0, -1), (0, 0, -1), (0, 0, -1), (0, -1, 0), (0, -1, 0), (0, -1, 0), (0, -1, 0), (1, 0, 0), (1, 0, 0), (1, 0, 0), (1, 0, 0), (-1, 0, 0), (-1, 0, 0), (-1, 0, 0), (-1, 0, 0)] ( + elementSize = 24 + interpolation = "faceVarying" + ) + int[] primvars:normals:indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23] + texCoord2f[] primvars:st = [(0.375, 0), (0.625, 0), (0.375, 0.25), (0.625, 0.25), (0.375, 0.5), (0.625, 0.5), (0.375, 0.75), (0.625, 0.75), (0.375, 1), (0.625, 1), (0.875, 0), (0.875, 0.25), (0.125, 0), (0.125, 0.25)] ( + customData = { + dictionary Maya = { + token name = "map1" + } + } + interpolation = "faceVarying" + ) + int[] primvars:st:indices = [0, 1, 3, 2, 2, 3, 5, 4, 4, 5, 7, 6, 6, 7, 9, 8, 1, 10, 11, 3, 12, 0, 2, 13] + uniform token subdivisionScheme = "none" + float3 xformOp:scale = (0.13136616, 0.13136616, 0.13136616) + double3 xformOp:translate = (0, 0.06700861885021758, -0.18152052818961276) + uniform token[] xformOpOrder = ["xformOp:translate", "xformOp:scale"] + + def GeomSubset "back" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [2] + } + + def GeomSubset "bottom" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [3] + } + + def GeomSubset "front" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [0] + } + + def GeomSubset "left" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [5] + } + + def GeomSubset "right" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [4] + } + + def GeomSubset "top" + { + uniform token elementType = "face" + uniform token familyName = "componentTag" + int[] indices = [1] + } +} + +def "mtl" +{ + def Material "aiStandardSurface4" + { + token outputs:arnold:surface.connect = + + def Shader "aiStandardSurface4" + { + uniform token info:id = "arnold:standard_surface" + token outputs:surface + } + } + + def Material "aiStandardSurface2" + { + token outputs:arnold:surface.connect = + + def Shader "aiStandardSurface2" + { + uniform token info:id = "arnold:standard_surface" + color3f inputs:base_color = (0, 1, 0) + token outputs:surface + } + } + + def Material "aiStandardSurface1" + { + token outputs:arnold:surface.connect = + + def Shader "aiStandardSurface1" + { + uniform token info:id = "arnold:standard_surface" + color3f inputs:base_color = (1, 0, 0) + token outputs:surface + } + } + + def Material "aiStandardSurface3" + { + token outputs:arnold:surface.connect = + + def Shader "aiStandardSurface3" + { + uniform token info:id = "arnold:standard_surface" + color3f inputs:base_color = (0, 0, 1) + token outputs:surface + } + } +} + diff --git a/usd_教程.txt b/usd_教程.txt new file mode 100644 index 0000000..dd2c9c2 --- /dev/null +++ b/usd_教程.txt @@ -0,0 +1 @@ +https://www.youtube.com/watch?v=5hSx6PfXfg8&list=PL8hZ6hQCGHMXCJT70XGxGhHrNjBgvF-eX&index=7 \ No newline at end of file diff --git a/usd设置关键帧.txt b/usd设置关键帧.txt new file mode 100644 index 0000000..1c2d453 --- /dev/null +++ b/usd设置关键帧.txt @@ -0,0 +1,1154 @@ +from pxr import Usd, UsdGeom, Gf +import random + +# 创建一个新的USD场景 +stage = Usd.Stage.CreateNew('HelloWorld.usda') + +# 定义一个变换和一个球体 +xformPrim = UsdGeom.Xform.Define(stage, '/hello') +spherePrim = UsdGeom.Sphere.Define(stage, '/hello/world') + +# 获取球体的变换属性 +xformAttr = UsdGeom.Xformable(spherePrim).AddTranslateOp() + +# 在随机位置移动球体并设置十个关键帧 +for frame in range(1, 11): + # 生成随机位置 + pos = Gf.Vec3f(random.uniform(-10, 10), random.uniform(-10, 10), random.uniform(-10, 10)) + + # 设置关键帧 + xformAttr.Set(pos, frame) + +# 保存场景 +stage.GetRootLayer().Save() + + + + + + + + + +import maya.cmds as cmds + +# 创建一个pxrUsdReferenceAssembly节点 +usdNode = cmds.createNode('mayaUsdProxyShape') + +# 加载USD文件 +cmds.setAttr(usdNode + '.filePath', '/home/jcen/PycharmProjects/pythonProject4/sphere_animation.usda', type='string') + +# 刷新节点以加载USD文件 +cmds.refresh(usdNode) + + + + + + + + + + +https://github.com/kiryha/Houdini/wiki/Pixar-USD-Python-API#save-mesh-object-to-usd-file + + + + + + + + + + + + + +from pxr import Usd, UsdGeom, Sdf + +# 创建一个新的USD文件 +stage = Usd.Stage.CreateNew('box.usda') + +# 创建一个Box +box = UsdGeom.Xform.Define(stage, '/Box') + +# 创建一个variantSet +variantSet = box.GetPrim().GetVariantSets().AddVariantSet('geo') + +# 添加variant +for variantName in ['base', 'client_model', 'collision', 'deform_high', 'deform_low', 'display_high', 'display_low', 'garment', 'maquette', 'muscles', 'pose', 'render_high', 'scan_base', 'scan_high', 'scan_low', 'scan_mid', 'sculpt_bind', 'sculpt_pose', 'sculpt_symm', 'shapes_high']: + variantSet.AddVariant(variantName) + variantSet.SetVariantSelection(variantName) + with variantSet.GetVariantEditContext(): + if variantName in ['base', 'deform_high', 'display_high', 'pose', 'render_high']: + # 在这些variant中,我们添加一个extentsHint属性 + box.GetPrim().CreateAttribute('extentsHint', Sdf.ValueTypeNames.Float3Array).Set([(-16.629925, -0.035511017, -11.017016), (16.459915, 3.3556676, 11.016925)]) + # if variantName == 'base': + # # 在base变体中,我们添加一个payload + # with stage.GetEditTarget().GetLayer().PermissionToEdit(): + # box.GetPrim().GetPayloads().AddPayload(r"C:\Users\ARTIST\Desktop\tstProj\s.usda") + +# 保存USD文件 +stage.GetRootLayer().Save() + + + + + + + + + + + + + + + + + + + + +# -*- coding: utf-8 Jcen -*- +import os, shutil +import re +# +# file_size = 0 +# +# for root, dirs, files in os.walk('W:/LFC'): +# try: +# if dirs[0] == 'render' and re.search('write', root.lower()): +# # print(root, dirs, files) +# # print() +# if re.search(r'v\d+', os.listdir(os.path.join(root.replace('\\', '/'), dirs[0]))[0]): +# # print(os.listdir(os.path.join(root.replace('\\', '/'), dirs[0]))) +# # print(root, dirs, files, os.listdir(os.path.join(root.replace('\\', '/'), dirs[0]))[0]) +# +# if re.search(r'.*[a-zA-Z]\d{2}_\d{3}.*', os.listdir(os.path.join(root, dirs[0], os.listdir(os.path.join(root.replace('\\', '/'), dirs[0]))[0]))[0]): +# path = os.path.join(root, dirs[0], os.listdir(os.path.join(root.replace('\\', '/'), dirs[0]))[0]) +# file = os.listdir(path) +# for f in file: +# match1 = re.search(r'\.\d{4}\.exr|\.\d{3}\.exr', f) +# if match1: +# frame = match1.group() +# if len(frame) == 8: +# frame = frame[:1] + '0' + frame[1:] +# file_path = os.path.join(path, f).replace('\\', '/') +# new_file_path = ['T:/proj/LFC/FIND_EXR', root.replace('\\', '/').split('/')[2], root.replace('\\', '/').split('/')[3], 'render_' + root.replace('\\', '/').split('/')[3] + frame] +# # print(root.replace('\\', '/').split('/')[2], 'render_' + root.replace('\\', '/').split('/')[3], match1.group()) +# file_size += os.path.getsize(file_path) +# print(file_path, '/'.join(new_file_path)) +# print('/'.join('/'.join(new_file_path).split('/')[:-1])) +# # if not os.path.exists('/'.join('/'.join(new_file_path).split('/')[:-1])): +# # os.makedirs('/'.join('/'.join(new_file_path).split('/')[:-1])) +# shutil.copy2(file_path, '/'.join(new_file_path)) +# except: +# pass +# +# print(file_size / (1024**3)) + + +# for i in os.listdir(r'T:\proj\LFC\FIND_EXR\q30'): +# # os.rename() +# os.rename(os.path.join('T:\proj\LFC\FIND_EXR\q30', i), os.path.join('T:\proj\LFC\FIND_EXR\q30', 'render_'+i)) + +# from pxr import Usd, UsdGeom +# stage = Usd.Stage.CreateNew('HelloWorld.usda') +# xformPrim = UsdGeom.Xform.Define(stage, '/hello') +# spherePrim = UsdGeom.Sphere.Define(stage, '/hello/world') +# stage.GetRootLayer().Save() + + +# from pxr import Usd, UsdGeom +# stage = Usd.Stage.Open('Hello_World1.usda') +# colorAttr = UsdGeom.Gprim.Get(stage, '/Root/Mesh').GetDisplayColorAttr() +# colorAttr.Clear() +# print(stage.GetRootLayer().ExportToString()) +# +# rootPrim = stage.GetPrimAtPath('/Root') +# vset = rootPrim.GetVariantSets().AddVariantSet('shadingVariant') +# print(stage.GetRootLayer().ExportToString()) +# +# +# vset.AddVariant('red') +# vset.AddVariant('blue') +# vset.AddVariant('green') +# print(stage.GetRootLayer().ExportToString()) +# +# vset.SetVariantSelection('red') +# with vset.GetVariantEditContext(): +# colorAttr.Set([(1,0,0)]) +# +# print(stage.GetRootLayer().ExportToString()) +# +# vset.SetVariantSelection('blue') +# with vset.GetVariantEditContext(): +# colorAttr.Set([(0,0,1)]) +# +# vset.SetVariantSelection('green') +# with vset.GetVariantEditContext(): +# colorAttr.Set([(0,1,0)]) +# +# print(stage.GetRootLayer().ExportToString()) +# stage.GetRootLayer().Export('HelloWorldWithVariants1.usda') + + + +# from pxr import Usd, UsdGeom, Sdf +# +# def crate_geometry(): +# """ +# Procedurally create geometry and save it to the USDA file +# """ +# +# # Create USD +# stage = Usd.Stage.CreateNew('hello_world1.usda') +# +# # Build mesh object +# root_xform = UsdGeom.Xform.Define(stage, '/Root') +# mesh = UsdGeom.Mesh.Define(stage, '/Root/Mesh') +# +# # Build mesh geometry. Here polygon creation magic should happen +# geometry_data = {'points': [(-1, 0, 1), (1, 0, 1), (1, 0, -1), (-1, 0, -1)], +# 'face_vertex_counts': [4], +# 'face_vertex_indices': [0, 1, 2, 3]} +# +# # Set mesh attributes +# mesh.GetPointsAttr().Set(geometry_data['points']) +# mesh.GetFaceVertexCountsAttr().Set(geometry_data['face_vertex_counts']) +# mesh.GetFaceVertexIndicesAttr().Set(geometry_data['face_vertex_indices']) +# +# # Save USD +# stage.GetRootLayer().Save() +# +# +# crate_geometry() +# +# from pxr import Usd, UsdGeom, Sdf, Gf, UsdShade +# import random +# stage = Usd.Stage.Open('HelloWorldWithVariants1.usda') +# material = UsdShade.Material.Define(stage, '/Root/Mesh') +# +# pbrShader = UsdShade.Shader.Define(stage, '/Root/Mesh/PBRShader') +# pbrShader.CreateIdAttr("UsdPreviewSurface") +# pbrShader.CreateInput("roughness", Sdf.ValueTypeNames.Float).Set(0.4) +# pbrShader.CreateInput("metallic", Sdf.ValueTypeNames.Float).Set(0.0) +# +# # material.CreateSurfaceOutput().ConnectToSource(pbrShader.ConnectableAPI(), "surface") +# # stReader = UsdShade.Shader.Define(stage, '/Root/Mesh/stReader') +# # stReader.CreateIdAttr('UsdPrimvarReader_float2') +# # # 定义一个变换和一个球体 +# # spherePrim = stage.GetPrimAtPath('/Root/Mesh') +# # # 获取球体的变换属性 +# # xformAttr = UsdGeom.Xformable(spherePrim).AddTranslateOp() +# # +# # # 在随机位置移动球体并设置十个关键帧 +# # for frame in range(1, 11): +# # # 生成随机位置 +# # pos = Gf.Vec3f(random.uniform(-10, 10), random.uniform(-10, 10), random.uniform(-10, 10)) +# # +# # # 设置关键帧 +# # xformAttr.Set(pos, frame) +# # +# # 保存场景 +# stage.GetRootLayer().Save() + + +from pxr import Usd, UsdGeom, Sdf, UsdShade + +# 创建一个新的USD文件 +stage = Usd.Stage.CreateNew('box.usda') + +# 创建一个Box +box = UsdGeom.Xform.Define(stage, '/Box') +# stage = Usd.Stage.Open('box1.usda') +# +# print(dir(UsdGeom.Xform)) +# try: +# print(UsdGeom.Xform.Get(stage, '')) +# except: +# pass +# # print(box) +# print(dir(stage)) + +# 创建一个variantSet +variantSet = box.GetPrim().GetVariantSets().AddVariantSet('geo') + +# 添加variant +for variantName in ['base']: + # ', client_model', 'collision', 'deform_high', 'deform_low', 'display_high', 'display_low', 'garment', 'maquette', 'muscles', 'pose', 'render_high', 'scan_base', 'scan_high', 'scan_low', 'scan_mid', 'sculpt_bind', 'sculpt_pose', 'sculpt_symm', 'shapes_high']: + variantSet.AddVariant(variantName) + variantSet.SetVariantSelection(variantName) + if variantSet.GetVariantSelection() == 'base': + with variantSet.GetVariantEditContext(): + new_xform = UsdGeom.Mesh.Define(stage, '/Box/NewXform') + new_xform.GetPrim().GetPayloads().AddPayload(r"C:\Users\ARTIST\Desktop\tstProj\s.usda") + with variantSet.GetVariantEditContext(): + if variantName in ['base', 'deform_high', 'display_high', 'pose', 'render_high']: + # 在这些variant中,我们添加一个extentsHint属性 + box.GetPrim().CreateAttribute('extentsHint', Sdf.ValueTypeNames.Float3Array).Set([(-16.629925, -0.035511017, -11.017016), (16.459915, 3.3556676, 11.016925)]) + # if variantName == 'base': + + + # box.GetPrim().CreateAttribute('extentsHint', Sdf.ValueTypeNames.Float3Array).Set( + # [(-16.629925, -0.035511017, -11.017016), (16.459915, 3.3556676, 11.016925)]) + # # 在base变体中,我们添加一个payload + # with stage.GetEditTarget().GetLayer().PermissionToEdit(): + # box.GetPrim().GetPayloads().AddPayload(r"C:\Users\ARTIST\Desktop\tstProj\s.usda") + +# 保存USD文件 +stage.GetRootLayer().Save() + + + + + + + + + + + +""" +Export geometry from Maya scene to USD file +""" + +import random +from pxr import Usd, UsdGeom +import pymel.core as pm + + +def get_geometry_data(mesh): + """ + Get points data for each face for USD file record + """ + + points = [] # World position coordinates (tuples) for each geometry point (point3f[] points) + face_vertex_counts = [] # Number of vertices in each geometry face (int[] faceVertexCounts) + face_vertex_indices = [] # List of geometry vertex indexes (int[] faceVertexIndices) + + # Get vertex data for each face + vertex_index = 0 + for face in mesh.faces: + vertex_indexes = [] + for vertex in face.getVertices(): + position = tuple(mesh.vtx[vertex].getPosition(space='world')) + points.append(position) + vertex_indexes.append(vertex_index) + vertex_index += 1 + + face_vertex_counts.append(len(vertex_indexes)) + face_vertex_indices.extend(vertex_indexes) + + return points, face_vertex_counts, face_vertex_indices + + +def process_geometry(stage, root_xform): + """ + Iterate over all scene meshes and record them to the USD stage + """ + + for mesh in pm.ls(type='mesh'): + + # Create a USD Mesh primitive for the mesh object + usd_mesh = UsdGeom.Mesh.Define(stage, root_xform.GetPath().AppendChild(mesh.getParent().name())) + + # Get geometry data + points, face_vertex_counts, face_vertex_indices = get_geometry_data(mesh) + + # Set the collected attributes for the USD Mesh + usd_mesh.GetPointsAttr().Set(points) + usd_mesh.GetFaceVertexCountsAttr().Set(face_vertex_counts) + usd_mesh.GetFaceVertexIndicesAttr().Set(face_vertex_indices) + + +def export_geometry(): + """ + Create USD file and record geometry data + """ + + # Create USD stage + usd_file_path = r"D:\Test\temp5.usda" + + # Create USD stage and root object + stage = Usd.Stage.CreateNew(usd_file_path) + spherePrim = UsdGeom.Xform.Define(stage, '/pSphere1') + rootPrim = stage.GetPrimAtPath('/pSphere1') + stage.SetDefaultPrim(rootPrim) + + + UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) + stage.SetMetadata('metersPerUnit', 0.01) + stage.GetRootLayer().documentation = "Generated from Composed Stage of pSphere1 layer" + + + root_xform = UsdGeom.Xform.Define(stage, '/') + + process_geometry(stage, spherePrim) + + # Save the USD stage to the file + stage.GetRootLayer().Save() + print(f'>> {usd_file_path}') + + +export_geometry() +# print(get_geometry_data(pm.ls(type='mesh')[0])) + + + +""" +Export geometry from Maya scene to USD file +""" + +import random +from pxr import Usd, UsdGeom +import pymel.core as pm + + +def get_geometry_data(mesh): + """ + Get points data for each face for USD file record + """ + + points = [] + face_vertex_counts = [] + face_vertex_indices = [] + uv_coords = [] + + vertex_index = 0 + for face in mesh.faces: + vertex_indexes = [] + for vertex in face.getVertices(): + position = tuple(mesh.vtx[vertex].getPosition(space='world')) + points.append(position) + vertex_indexes.append(vertex_index) + vertex_index += 1 + + face_vertex_counts.append(len(vertex_indexes)) + face_vertex_indices.extend(vertex_indexes) + + uvs = mesh.getUVs() + for i in range(len(uvs[0])): + uv = (uvs[0][i], uvs[1][i]) + uv_coords.append(uv) + + return points, face_vertex_counts, face_vertex_indices, uv_coords + + +def process_geometry(stage, root_xform): + """ + Iterate over all scene meshes and record them to the USD stage + """ + + for mesh in pm.ls(type='mesh'): + + # Create a USD Mesh primitive for the mesh object + usd_mesh = UsdGeom.Mesh.Define(stage, root_xform.GetPath().AppendChild(mesh.getParent().name())) + + # Get geometry data + points, face_vertex_counts, face_vertex_indices, uv_coords = get_geometry_data(mesh) + + # Set the collected attributes for the USD Mesh + usd_mesh.GetPointsAttr().Set(points) + usd_mesh.GetFaceVertexCountsAttr().Set(face_vertex_counts) + usd_mesh.GetFaceVertexIndicesAttr().Set(face_vertex_indices) + stAttr = usd_mesh.GetPrim().CreateAttribute("primvars:st", Sdf.ValueTypeNames.TexCoord2fArray, False) + stAttr.Set(uv_coords) + + +def export_geometry(): + """ + Create USD file and record geometry data + """ + + # Create USD stage + usd_file_path = r"D:\Test\temp11.usda" + + # Create USD stage and root object + stage = Usd.Stage.CreateNew(usd_file_path) + spherePrim = UsdGeom.Xform.Define(stage, '/pSphere1') + rootPrim = stage.GetPrimAtPath('/pSphere1') + stage.SetDefaultPrim(rootPrim) + + + UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) + stage.SetMetadata('metersPerUnit', 0.01) + stage.GetRootLayer().documentation = "Generated from Composed Stage of pSphere1 layer" + + + root_xform = UsdGeom.Xform.Define(stage, '/') + + process_geometry(stage, spherePrim) + + # Save the USD stage to the file + stage.GetRootLayer().Save() + print(f'>> {usd_file_path}') + + +export_geometry() +# print(get_geometry_data(pm.ls(type='mesh')[0])) + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" +Export geometry from Maya scene to USD file +""" + +import random +from pxr import Usd, UsdGeom, Sdf +import pymel.core as pm + + +def get_geometry_data(mesh): + """ + Get points data for each face for USD file record + """ + + points = [] + face_vertex_counts = [] + face_vertex_indices = [] + uv_coords = [] + + vertex_index = 0 + for face in mesh.faces: + vertex_indexes = [] + for vertex in face.getVertices(): + position = tuple(mesh.vtx[vertex].getPosition(space='world')) + points.append(position) + vertex_indexes.append(vertex_index) + vertex_index += 1 + + face_vertex_counts.append(len(vertex_indexes)) + face_vertex_indices.extend(vertex_indexes) + + uvs = mesh.getUVs() + for i in range(len(uvs[0])): + uv = (uvs[0][i], uvs[1][i]) + uv_coords.append(uv) + + return points, face_vertex_counts, face_vertex_indices, uv_coords + + +def process_geometry(stage, root_xform): + """ + Iterate over all scene meshes and record them to the USD stage + """ + + for mesh in pm.ls(type='mesh'): + + # Create a USD Mesh primitive for the mesh object + usd_mesh = UsdGeom.Mesh.Define(stage, root_xform.GetPath().AppendChild(mesh.getParent().name())) + + # Get geometry data + points, face_vertex_counts, face_vertex_indices, uv_coords = get_geometry_data(mesh) + + # Set the collected attributes for the USD Mesh + usd_mesh.GetPointsAttr().Set(points) + usd_mesh.GetFaceVertexCountsAttr().Set(face_vertex_counts) + usd_mesh.GetFaceVertexIndicesAttr().Set(face_vertex_indices) + stAttr = usd_mesh.GetPrim().CreateAttribute("primvars:st", Sdf.ValueTypeNames.TexCoord2fArray, False) + stAttr.Set(uv_coords) + # 添加自定义数据 + # 定义自定义数据 + customData = { + "dictionary Maya": { + "token name": "map1" + } + } + stAttr.SetCustomData(customDataDict) + # 修改插值方式 + # 修改插值方式的元数据 + stAttr.SetMetadata("interpolation", UsdGeom.Tokens.faceVarying) + + +def export_geometry(): + """ + Create USD file and record geometry data + """ + + # Create USD stage + usd_file_path = r"C:\Users\Jcen\Desktop\USD\temp22.usda" + + # Create USD stage and root object + stage = Usd.Stage.CreateNew(usd_file_path) + spherePrim = UsdGeom.Xform.Define(stage, '/pSphere1') + rootPrim = stage.GetPrimAtPath('/pSphere1') + stage.SetDefaultPrim(rootPrim) + + + UsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.y) + stage.SetMetadata('metersPerUnit', 0.01) + stage.GetRootLayer().documentation = "Generated from Composed Stage of pSphere1 layer" + + + root_xform = UsdGeom.Xform.Define(stage, '/') + + process_geometry(stage, spherePrim) + + # Save the USD stage to the file + stage.GetRootLayer().Save() + print(f'>> {usd_file_path}') + + +export_geometry() +# print(get_geometry_data(pm.ls(type='mesh')[0])) + + + + + + + + + + + + + +import os +import tempfile +from pathlib import Path + +import pymel.core as pm +from pxr import Kind, Sdf, Tf, Usd, UsdGeom, UsdShade + +USD_ROOT = Path("Z:/Test_USD") +COMPONENT_NAME = "my_component" + + +def extract_geo_proxy(src_usd_stage: Usd.Stage, dst_usd_file: Path): + src_root_prim = src_usd_stage.GetDefaultPrim() + src_proxy_prim = src_root_prim.GetPrimAtPath("geo/proxy") + + if not src_proxy_prim: + return + + dst_usd_stage = Usd.Stage.CreateInMemory() + dst_proxy_prim = dst_usd_stage.DefinePrim("/proxy", "Scope") + dst_usd_stage.SetDefaultPrim(dst_proxy_prim) + UsdGeom.SetStageMetersPerUnit(dst_usd_stage, UsdGeom.LinearUnits.centimeters) + UsdGeom.SetStageUpAxis(dst_usd_stage, UsdGeom.Tokens.y) + + UsdGeom.Imageable(dst_proxy_prim).GetPurposeAttr().Set(UsdGeom.Tokens.proxy) + + # reference to the source proxy prim and then flatten the stage + dst_proxy_prim.GetReferences().AddReference( + assetPath=src_usd_stage.GetRootLayer().realPath, + primPath=src_proxy_prim.GetPath(), + ) + + dst_usd_stage.Flatten().Export(dst_usd_file.as_posix()) + + +def extract_geo_render(src_usd_stage: Usd.Stage, dst_usd_file: Path): + src_root_prim = src_usd_stage.GetDefaultPrim() + src_render_prim = src_root_prim.GetPrimAtPath("geo/render") + + if not src_render_prim: + return + + dst_usd_stage = Usd.Stage.CreateInMemory() + dst_render_prim = dst_usd_stage.DefinePrim("/render", "Scope") + dst_usd_stage.SetDefaultPrim(dst_render_prim) + UsdGeom.SetStageMetersPerUnit(dst_usd_stage, UsdGeom.LinearUnits.centimeters) + UsdGeom.SetStageUpAxis(dst_usd_stage, UsdGeom.Tokens.y) + + UsdGeom.Imageable(dst_render_prim).GetPurposeAttr().Set(UsdGeom.Tokens.render) + + # reference to the source render prim and then flatten the stage + dst_render_prim.GetReferences().AddReference( + assetPath=src_usd_stage.GetRootLayer().realPath, + primPath=src_render_prim.GetPath(), + ) + + dst_usd_stage.Flatten().Export(dst_usd_file.as_posix()) + + +def extract_material(src_usd_stage: Usd.Stage, dst_usd_file: Path): + src_root_prim = src_usd_stage.GetDefaultPrim() + src_material_prim = src_root_prim.GetPrimAtPath("mtl") + + if not src_material_prim: + return + + dst_usd_stage = Usd.Stage.CreateInMemory() + dst_usd_stage.OverridePrim("/root") + dst_mlt_prim = dst_usd_stage.DefinePrim("/root/mtl", "Scope") + dst_usd_stage.SetDefaultPrim(dst_mlt_prim) + + # reference to the source material prim + dst_mlt_prim.GetReferences().AddReference( + assetPath=src_usd_stage.GetRootLayer().realPath, + primPath=src_material_prim.GetPath(), + ) + + # Iterate over all the prims under /root/geo and extract the material binding + for src_prim in Usd.PrimRange(src_root_prim.GetPrimAtPath("geo")): + if not src_prim.HasAPI(UsdShade.MaterialBindingAPI): + continue + + # get the bound material from the source prim + src_material_binding_api = UsdShade.MaterialBindingAPI(src_prim) + material, relationship = src_material_binding_api.ComputeBoundMaterial() + if not material: + continue + + # create override prim for the destination prim and set the material binding + dst_prim = dst_usd_stage.OverridePrim(src_prim.GetPath()) + dst_material_binding_api = UsdShade.MaterialBindingAPI(dst_prim) + dst_material_binding_api.Bind(material) + + dst_root_layer = dst_usd_stage.Flatten() + print(dst_root_layer.ExportToString()) + dst_root_layer.Export(dst_usd_file.as_posix()) + + +def construct_modelling_usd( + modelling_usd_file: Path, + proxy_usd_file: Path, + render_usd_file: Path, +): + usd_stage = Usd.Stage.CreateNew(modelling_usd_file.as_posix()) + + # create prims + root_prim = usd_stage.DefinePrim("/root", "Xform") + geo_prim = usd_stage.DefinePrim("/root/geo", "Scope") + proxy_prim = usd_stage.DefinePrim("/root/geo/proxy", "Scope") + render_prim = usd_stage.DefinePrim("/root/geo/render", "Scope") + + # set root prim metadata + model_api = Usd.ModelAPI(root_prim) + model_api.SetKind(Kind.Tokens.component) + root_prim.SetInstanceable(True) + + # use relative path to reference the proxy and render usd files + proxy_relative_path = os.path.relpath(proxy_usd_file, modelling_usd_file.parent) + proxy_prim.GetPayloads().SetPayloads([Sdf.Payload(proxy_relative_path)]) + + render_relative_path = os.path.relpath(render_usd_file, modelling_usd_file.parent) + render_prim.GetPayloads().SetPayloads([Sdf.Payload(render_relative_path)]) + + # set the metadata for the stage + usd_stage.SetDefaultPrim(root_prim) + UsdGeom.SetStageMetersPerUnit(usd_stage, UsdGeom.LinearUnits.centimeters) + UsdGeom.SetStageUpAxis(usd_stage, UsdGeom.Tokens.y) + + # save the stage + usd_stage.Save() + + +def construct_component_usd( + component_usd_file: Path, + modelling_usd_file: Path, + material_usd_file: Path, +): + usd_stage = Usd.Stage.CreateNew(component_usd_file.as_posix()) + + # add modelling usd file and material usd file as sublayers + sub_layer_paths = usd_stage.GetRootLayer().subLayerPaths + sub_layer_paths.append( + os.path.relpath(material_usd_file, component_usd_file.parent) + ) + sub_layer_paths.append( + os.path.relpath(modelling_usd_file, component_usd_file.parent) + ) + + usd_stage.Save() + + +def export_usd(dag_object): + # temp_dir = tempfile.mkdtemp(prefix="usd_export_") + # usd_file = os.path.join(temp_dir, "temp.usda") + usd_file = r"D:\Test\temp.usda" + + pm.mayaUSDExport( + file=usd_file, + exportRoots=dag_object, + defaultUSDFormat="usda", + jobContext="Arnold", + stripNamespaces=True, + materialsScopeName="mtl", + ) + + print(usd_file) + return usd_file + + +usd_file = export_usd(pm.selected()[0]) + +src_usd_stage = Usd.Stage.Open(usd_file) +extract_geo_proxy(src_usd_stage, Path(r"D:\Test\proxy.usda")) +extract_geo_render(src_usd_stage, Path(r"D:\Test\render.usda")) +extract_material(src_usd_stage, Path(r"D:\Test\material.usda")) +construct_modelling_usd( + Path("D:/Test/modelling.usda"), + Path("D:/Test/proxy.usda"), + Path("D:/Test/render.usda"), +) +construct_component_usd( + Path("D:/Test/component.usda"), + Path("D:/Test/modelling.usda"), + Path("D:/Test/material.usda"), +) + + + + + + + + + + + + + + + + + + + + + + + +def maya_export_usd_geo(path, startTime, endTime): + import maya.mel as mel + import os + mel.eval( + f'file -force -options ";exportUVs=1;exportSkels=none;exportSkin=none;exportBlendShapes=0;exportDisplayColor=0;;exportColorSets=1;exportComponentTags=1;defaultMeshScheme=catmullClark;animation=1;eulerFilter=0;staticSingleSample=0;startTime={startTime};endTime={endTime};frameStride=1;frameSample=0.0;defaultUSDFormat=usda;parentScope=;defaultPrim={};shadingMode=useRegistry;convertMaterialsTo=[UsdPreviewSurface];exportRelativeTextures=automatic;exportInstances=1;exportVisibility=1;mergeTransformAndShape=1;stripNamespaces=0;worldspace=0;excludeExportTypes=[]" -typ "USD Export" -pr -es "{path}";' + ) + os.rename(path, path + 'a') + + + + + + + + + + + + + +from pxr import Usd, UsdGeom, Sdf +import maya.cmds as cmds +import maya.mel as mel + +# 创建一个新的USD Stage +stage = Usd.Stage.CreateNew('C:/Users/Jcen/Desktop/USD/yourStage10.usda') + +# 在Maya中创建一个代表这个Stage的节点 +mayaUsdNode = cmds.createNode('mayaUsdProxyShape', name='MyUSDStage') +cmds.setAttr(f'{mayaUsdNode}.filePath', 'C:/Users/Jcen/Desktop/USD/yourStage10.usda', type='string') + +# 将sublayer文件添加到主Stage的层级中 +stage.GetRootLayer().subLayerPaths.append("C:/Users/Jcen/PycharmProjects/USD/assembly/cube.usda") +# 创建一个新的USD Stage +sublayer1 = Usd.Stage.CreateNew('C:/Users/Jcen/Desktop/USD/sublayer7.usda') + +stage.GetRootLayer().subLayerPaths.append('C:/Users/Jcen/Desktop/USD/sublayer7.usda') + +Sdf.Layer.FindOrOpen("C:/Users/Jcen/PycharmProjects/USD/assembly/cube.usda").SetPermissionToEdit(False) + +# 假设你已经知道要操作的层的名字或路径 +layer_path = 'C:/Users/Jcen/Desktop/USD/sublayer7.usda' + +# 使用MEL命令选中层 +# 注意:这里的命令可能需要根据实际的UI组件和层次结构进行调整 +mel.eval(f'''mayaUsdEditTarget -edit -editTarget "{layer_path}" "|mayaUsdProxy1|MyUSDStage";''') + + +# 添加一个Xform和一个Cube +# xformPath = Sdf.Path('/World') +# cubePath = xformPath.AppendChild('Cube') + +# xform = UsdGeom.Xform.Define(stage, xformPath) +# cube = UsdGeom.Cube.Define(stage, cubePath) + +# 设置Cube的属性 +# cube.GetSizeAttr().Set(1.0) + +# 保存Stage +stage.GetRootLayer().Save() + +# 刷新Maya视图以显示新的USD内容 +cmds.refresh() + + + + + + + + + + + + + + + + + +from pxr import Usd, UsdGeom, Sdf +import maya.cmds as cmds +import maya.mel as mel + +# 创建一个新的USD Stage +stage = Usd.Stage.CreateNew('C:/Users/Jcen/Desktop/USD/yourStage10.usda') + +# 在Maya中创建一个代表这个Stage的节点 +mayaUsdNode = cmds.createNode('mayaUsdProxyShape', name='MyUSDStage') +cmds.setAttr(f'{mayaUsdNode}.filePath', 'C:/Users/Jcen/Desktop/USD/yourStage10.usda', type='string') + +# 将sublayer文件添加到主Stage的层级中 +stage.GetRootLayer().subLayerPaths.append("C:/Users/Jcen/PycharmProjects/USD/assembly/cube.usda") +# 创建一个新的USD Stage +sublayer1 = Usd.Stage.CreateNew('C:/Users/Jcen/Desktop/USD/sublayer7.usda') + +stage.GetRootLayer().subLayerPaths.append('C:/Users/Jcen/Desktop/USD/sublayer7.usda') + +Sdf.Layer.FindOrOpen("C:/Users/Jcen/PycharmProjects/USD/assembly/cube.usda").SetPermissionToEdit(False) + +# 假设你已经知道要操作的层的名字或路径 +layer_path = 'C:/Users/Jcen/Desktop/USD/sublayer7.usda' + +# 使用MEL命令选中层 +# 注意:这里的命令可能需要根据实际的UI组件和层次结构进行调整 +mel.eval(f'''mayaUsdEditTarget -edit -editTarget "{layer_path}" "|mayaUsdProxy1|MyUSDStage";''') + + +# 添加一个Xform和一个Cube +# xformPath = Sdf.Path('/World') +# cubePath = xformPath.AppendChild('Cube') + +# xform = UsdGeom.Xform.Define(stage, xformPath) +# cube = UsdGeom.Cube.Define(stage, cubePath) + +# 设置Cube的属性 +# cube.GetSizeAttr().Set(1.0) + +# 保存Stage +stage.GetRootLayer().Save() + +# 刷新Maya视图以显示新的USD内容 +cmds.refresh() + + + + + + + + + + + + + + + + + + + + +from pxr import Usd, UsdGeom, Sdf +import maya.cmds as cmds +import maya.mel as mel + +path = 'C:/Users/Jcen/Desktop/USD/yourStage15.usda' +main = "C:/Users/Jcen/PycharmProjects/USD/assembly/cube.usda" +edit = 'C:/Users/Jcen/Desktop/USD/sublayer13.usda' + +# 创建一个新的USD Stage +stage = Usd.Stage.CreateNew(path) + +# 在Maya中创建一个代表这个Stage的节点 +mayaUsdNode = cmds.createNode('mayaUsdProxyShape', name='MyUSDStage') +cmds.setAttr(f'{mayaUsdNode}.filePath', path, type='string') + +# 将sublayer文件添加到主Stage的层级中 +# workingLayer = Sdf.Layer.FindOrOpen(main) +# stage.SetEditTarget(workingLayer) + +# workingLayer1 = Sdf.Layer.FindOrOpen("C:/Users/Jcen/Desktop/USD/sublayer8.usda") +# stage.SetEditTarget(workingLayer1) + +stage.GetRootLayer().subLayerPaths.append(main) +# 创建一个新的USD Stage +sublayer1 = Usd.Stage.CreateNew(edit) +sublayer1.GetRootLayer().Save() +stage.GetRootLayer().subLayerPaths.append(edit) + +Sdf.Layer.FindOrOpen(main).SetPermissionToEdit(False) + +Sdf.Layer.FindOrOpen(path).SetPermissionToEdit(False) +# 假设你已经知道要操作的层的名字或路径 +layer_path = edit +# 使用MEL命令选中层 +# 注意:这里的命令可能需要根据实际的UI组件和层次结构进行调整 +mel.eval(f'''mayaUsdEditTarget -edit -editTarget "{layer_path}" "|mayaUsdProxy1|MyUSDStage";''') + + +# 添加一个Xform和一个Cube +# xformPath = Sdf.Path('/World') +# cubePath = xformPath.AppendChild('Cube') + +# xform = UsdGeom.Xform.Define(stage, xformPath) +# cube = UsdGeom.Cube.Define(stage, cubePath) + +# 设置Cube的属性 +# cube.GetSizeAttr().Set(1.0) + +# 保存Stage +# stage.GetRootLayer().Save() + +# 刷新Maya视图以显示新的USD内容 +cmds.refresh() + + + + + + + + + + + + + + + + + + + + + + + +from pxr import Usd, UsdGeom, Sdf + + + +stage = Usd.Stage.Open(r"C:\Users\Jcen\Desktop\test11111.usd") + +rootPrim = stage.GetPrimAtPath('/pCube3') + +stAttr = rootPrim.GetPrim().CreateAttribute("primvars:arnold:visibility:camera", Sdf.ValueTypeNames.Bool, False) +stAttr.Set(False) + +stage.GetRootLayer().Save() + + + +import maya.app.renderSetup.model.renderSetup as renderSetup + +rs = renderSetup.instance() + +selectedLayer = rs.getVisibleRenderLayer() + +print("当前选择的 Render Setup 层是:", selectedLayer.name()) + + +import maya.cmds as cmds + +def customCallback(): + print("Custom callback function executed before rendering") + # 在这里可以添加你想要在渲染前执行的操作 + +# 注册渲染前的回调函数 +jobNum = cmds.scriptJob(event=["renderLayerManagerChange", customCallback], protected=True) + +# 在这里可以添加其他需要执行的操作 + +# 删除回调函数 +# cmds.scriptJob(kill=jobNum) + + +import ufe + +for i in range(100): + print(i) + + +globalSelection = ufe.GlobalSelection.get() + +print(globalSelection) +tt = globalSelection.front() +print(tt.path()) + +import mayaUsd.ufe +stage = mayaUsd.ufe.getStage(str(tt.path()).split('/')[0]) +print(stage) +s = stage.GetRootLayer().realPath + +print(s) + +python("exec(open('C:/Users/Jcen/PycharmProjects/USD/renderseting/tstmel.py').read())"); + + + + + + + + + + + + + +from pxr import Usd, Sdf + +# 创建源Stage并添加一个Prim +source_stage = Usd.Stage.CreateNew("source.usda") +source_prim = source_stage.DefinePrim("/SourcePrim", "Xform") +source_prim.CreateAttribute("exampleAttr", Sdf.ValueTypeNames.String).Set("Hello, USD") + +# 创建新的Stage +new_stage = Usd.Stage.CreateNew("target.usda") + +# 获取源Stage和新Stage的根Layer +source_layer = source_stage.GetRootLayer() +target_layer = new_stage.GetRootLayer() + +# 复制Spec从源Layer到目标Layer +if Sdf.CopySpec(source_layer, source_prim.GetPath(), target_layer, "/TargetPrim"): + print("复制成功") +else: + print("复制失败") + +# 保存新Stage +new_stage.GetRootLayer().Save() \ No newline at end of file diff --git a/医学.txt b/医学.txt new file mode 100644 index 0000000..8a122d6 --- /dev/null +++ b/医学.txt @@ -0,0 +1 @@ +肿瘤医学、康复医学、营养医学、运动医学 \ No newline at end of file diff --git a/安装alien.txt b/安装alien.txt new file mode 100644 index 0000000..a969f5e --- /dev/null +++ b/安装alien.txt @@ -0,0 +1,5 @@ +sudo apt install debugedit=4.14.2.1+dfsg1.1-1+dde + +sudo apt install rpm + +sudo apt install alien \ No newline at end of file