€•&SŒsphinx.addnodes”Œdocument”“”)”}”(Œ rawsource”Œ”Œchildren”]”(Œ translations”Œ LanguagesNode”“”)”}”(hhh]”(hŒ pending_xref”“”)”}”(hhh]”Œdocutils.nodes”ŒText”“”ŒChinese (Simplified)”…””}”Œparent”hsbaŒ attributes”}”(Œids”]”Œclasses”]”Œnames”]”Œdupnames”]”Œbackrefs”]”Œ refdomain”Œstd”Œreftype”Œdoc”Œ reftarget”ŒG/translations/zh_CN/networking/device_drivers/ethernet/microsoft/netvsc”Œmodname”NŒ classname”NŒ refexplicit”ˆuŒtagname”hhh ubh)”}”(hhh]”hŒChinese (Traditional)”…””}”hh2sbah}”(h]”h ]”h"]”h$]”h&]”Œ refdomain”h)Œreftype”h+Œ reftarget”ŒG/translations/zh_TW/networking/device_drivers/ethernet/microsoft/netvsc”Œmodname”NŒ classname”NŒ refexplicit”ˆuh1hhh ubh)”}”(hhh]”hŒItalian”…””}”hhFsbah}”(h]”h ]”h"]”h$]”h&]”Œ refdomain”h)Œreftype”h+Œ reftarget”ŒG/translations/it_IT/networking/device_drivers/ethernet/microsoft/netvsc”Œmodname”NŒ classname”NŒ refexplicit”ˆuh1hhh ubh)”}”(hhh]”hŒJapanese”…””}”hhZsbah}”(h]”h ]”h"]”h$]”h&]”Œ refdomain”h)Œreftype”h+Œ reftarget”ŒG/translations/ja_JP/networking/device_drivers/ethernet/microsoft/netvsc”Œmodname”NŒ classname”NŒ refexplicit”ˆuh1hhh ubh)”}”(hhh]”hŒKorean”…””}”hhnsbah}”(h]”h ]”h"]”h$]”h&]”Œ refdomain”h)Œreftype”h+Œ reftarget”ŒG/translations/ko_KR/networking/device_drivers/ethernet/microsoft/netvsc”Œmodname”NŒ classname”NŒ refexplicit”ˆuh1hhh ubh)”}”(hhh]”hŒSpanish”…””}”hh‚sbah}”(h]”h ]”h"]”h$]”h&]”Œ refdomain”h)Œreftype”h+Œ reftarget”ŒG/translations/sp_SP/networking/device_drivers/ethernet/microsoft/netvsc”Œmodname”NŒ classname”NŒ refexplicit”ˆuh1hhh ubeh}”(h]”h ]”h"]”h$]”h&]”Œcurrent_language”ŒEnglish”uh1h hhŒ _document”hŒsource”NŒline”NubhŒcomment”“”)”}”(hŒ SPDX-License-Identifier: GPL-2.0”h]”hŒ SPDX-License-Identifier: GPL-2.0”…””}”hh£sbah}”(h]”h ]”h"]”h$]”h&]”Œ xml:space”Œpreserve”uh1h¡hhhžhhŸŒa/var/lib/git/docbuild/linux/Documentation/networking/device_drivers/ethernet/microsoft/netvsc.rst”h KubhŒsection”“”)”}”(hhh]”(hŒtitle”“”)”}”(hŒHyper-V network driver”h]”hŒHyper-V network driver”…””}”(hh»hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1h¹hh¶hžhhŸh³h Kubhµ)”}”(hhh]”(hº)”}”(hŒ Compatibility”h]”hŒ Compatibility”…””}”(hhÌhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1h¹hhÉhžhhŸh³h KubhŒ paragraph”“”)”}”(hŒKThis driver is compatible with Windows Server 2012 R2, 2016 and Windows 10.”h]”hŒKThis driver is compatible with Windows Server 2012 R2, 2016 and Windows 10.”…””}”(hhÜhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h K hhÉhžhubeh}”(h]”Œ compatibility”ah ]”h"]”Œ compatibility”ah$]”h&]”uh1h´hh¶hžhhŸh³h Kubhµ)”}”(hhh]”(hº)”}”(hŒFeatures”h]”hŒFeatures”…””}”(hhõhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1h¹hhòhžhhŸh³h Kubhµ)”}”(hhh]”(hº)”}”(hŒChecksum offload”h]”hŒChecksum offload”…””}”(hjhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1h¹hjhžhhŸh³h KubhŒ block_quote”“”)”}”(hŒñThe netvsc driver supports checksum offload as long as the Hyper-V host version does. Windows Server 2016 and Azure support checksum offload for TCP and UDP for both IPv4 and IPv6. Windows Server 2012 only supports checksum offload for TCP. ”h]”hÛ)”}”(hŒðThe netvsc driver supports checksum offload as long as the Hyper-V host version does. Windows Server 2016 and Azure support checksum offload for TCP and UDP for both IPv4 and IPv6. Windows Server 2012 only supports checksum offload for TCP.”h]”hŒðThe netvsc driver supports checksum offload as long as the Hyper-V host version does. Windows Server 2016 and Azure support checksum offload for TCP and UDP for both IPv4 and IPv6. Windows Server 2012 only supports checksum offload for TCP.”…””}”(hjhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h Khjubah}”(h]”h ]”h"]”h$]”h&]”uh1jhŸh³h Khjhžhubeh}”(h]”Œchecksum-offload”ah ]”h"]”Œchecksum offload”ah$]”h&]”uh1h´hhòhžhhŸh³h Kubhµ)”}”(hhh]”(hº)”}”(hŒReceive Side Scaling”h]”hŒReceive Side Scaling”…””}”(hj9hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1h¹hj6hžhhŸh³h Kubj)”}”(hXHyper-V supports receive side scaling. For TCP & UDP, packets can be distributed among available queues based on IP address and port number. For TCP & UDP, we can switch hash level between L3 and L4 by ethtool command. TCP/UDP over IPv4 and v6 can be set differently. The default hash level is L4. We currently only allow switching TX hash level from within the guests. On Azure, fragmented UDP packets have high loss rate with L4 hashing. Using L3 hashing is recommended in this case. For example, for UDP over IPv4 on eth0: To include UDP port numbers in hashing:: ethtool -N eth0 rx-flow-hash udp4 sdfn To exclude UDP port numbers in hashing:: ethtool -N eth0 rx-flow-hash udp4 sd To show UDP hash level:: ethtool -n eth0 rx-flow-hash udp4 ”h]”(hÛ)”}”(hŒŒHyper-V supports receive side scaling. For TCP & UDP, packets can be distributed among available queues based on IP address and port number.”h]”hŒŒHyper-V supports receive side scaling. For TCP & UDP, packets can be distributed among available queues based on IP address and port number.”…””}”(hjKhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h KhjGubhÛ)”}”(hŒäFor TCP & UDP, we can switch hash level between L3 and L4 by ethtool command. TCP/UDP over IPv4 and v6 can be set differently. The default hash level is L4. We currently only allow switching TX hash level from within the guests.”h]”hŒäFor TCP & UDP, we can switch hash level between L3 and L4 by ethtool command. TCP/UDP over IPv4 and v6 can be set differently. The default hash level is L4. We currently only allow switching TX hash level from within the guests.”…””}”(hjYhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h KhjGubhÛ)”}”(hŒsOn Azure, fragmented UDP packets have high loss rate with L4 hashing. Using L3 hashing is recommended in this case.”h]”hŒsOn Azure, fragmented UDP packets have high loss rate with L4 hashing. Using L3 hashing is recommended in this case.”…””}”(hjghžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h K"hjGubhÛ)”}”(hŒ'For example, for UDP over IPv4 on eth0:”h]”hŒ'For example, for UDP over IPv4 on eth0:”…””}”(hjuhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h K%hjGubhÛ)”}”(hŒ(To include UDP port numbers in hashing::”h]”hŒ'To include UDP port numbers in hashing:”…””}”(hjƒhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h K'hjGubhŒ literal_block”“”)”}”(hŒðtool -N eth0 rx-flow-hash udp4 sdfn”h]”hŒðtool -N eth0 rx-flow-hash udp4 sdfn”…””}”hj“sbah}”(h]”h ]”h"]”h$]”h&]”h±h²uh1j‘hŸh³h K)hjGubhÛ)”}”(hŒ(To exclude UDP port numbers in hashing::”h]”hŒ'To exclude UDP port numbers in hashing:”…””}”(hj¡hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h K+hjGubj’)”}”(hŒ$ethtool -N eth0 rx-flow-hash udp4 sd”h]”hŒ$ethtool -N eth0 rx-flow-hash udp4 sd”…””}”hj¯sbah}”(h]”h ]”h"]”h$]”h&]”h±h²uh1j‘hŸh³h K-hjGubhÛ)”}”(hŒTo show UDP hash level::”h]”hŒTo show UDP hash level:”…””}”(hj½hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h K/hjGubj’)”}”(hŒ!ethtool -n eth0 rx-flow-hash udp4”h]”hŒ!ethtool -n eth0 rx-flow-hash udp4”…””}”hjËsbah}”(h]”h ]”h"]”h$]”h&]”h±h²uh1j‘hŸh³h K1hjGubeh}”(h]”h ]”h"]”h$]”h&]”uh1jhŸh³h Khj6hžhubeh}”(h]”Œreceive-side-scaling”ah ]”h"]”Œreceive side scaling”ah$]”h&]”uh1h´hhòhžhhŸh³h Kubhµ)”}”(hhh]”(hº)”}”(hŒ Generic Receive Offload, aka GRO”h]”hŒ Generic Receive Offload, aka GRO”…””}”(hjêhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1h¹hjçhžhhŸh³h K4ubj)”}”(hŒŠThe driver supports GRO and it is enabled by default. GRO coalesces like packets and significantly reduces CPU usage under heavy Rx load. ”h]”hÛ)”}”(hŒ‰The driver supports GRO and it is enabled by default. GRO coalesces like packets and significantly reduces CPU usage under heavy Rx load.”h]”hŒ‰The driver supports GRO and it is enabled by default. GRO coalesces like packets and significantly reduces CPU usage under heavy Rx load.”…””}”(hjühžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h K5hjøubah}”(h]”h ]”h"]”h$]”h&]”uh1jhŸh³h K5hjçhžhubeh}”(h]”Œgeneric-receive-offload-aka-gro”ah ]”h"]”Œ generic receive offload, aka gro”ah$]”h&]”uh1h´hhòhžhhŸh³h K4ubhµ)”}”(hhh]”(hº)”}”(hŒ=Large Receive Offload (LRO), or Receive Side Coalescing (RSC)”h]”hŒ=Large Receive Offload (LRO), or Receive Side Coalescing (RSC)”…””}”(hjhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1h¹hjhžhhŸh³h K:ubj)”}”(hXKThe driver supports LRO/RSC in the vSwitch feature. It reduces the per packet processing overhead by coalescing multiple TCP segments when possible. The feature is enabled by default on VMs running on Windows Server 2019 and later. It may be changed by ethtool command:: ethtool -K eth0 lro on ethtool -K eth0 lro off ”h]”(hÛ)”}”(hXThe driver supports LRO/RSC in the vSwitch feature. It reduces the per packet processing overhead by coalescing multiple TCP segments when possible. The feature is enabled by default on VMs running on Windows Server 2019 and later. It may be changed by ethtool command::”h]”hX The driver supports LRO/RSC in the vSwitch feature. It reduces the per packet processing overhead by coalescing multiple TCP segments when possible. The feature is enabled by default on VMs running on Windows Server 2019 and later. It may be changed by ethtool command:”…””}”(hj-hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h K;hj)ubj’)”}”(hŒ.ethtool -K eth0 lro on ethtool -K eth0 lro off”h]”hŒ.ethtool -K eth0 lro on ethtool -K eth0 lro off”…””}”hj;sbah}”(h]”h ]”h"]”h$]”h&]”h±h²uh1j‘hŸh³h K@hj)ubeh}”(h]”h ]”h"]”h$]”h&]”uh1jhŸh³h K;hjhžhubeh}”(h]”Œ8large-receive-offload-lro-or-receive-side-coalescing-rsc”ah ]”h"]”Œ=large receive offload (lro), or receive side coalescing (rsc)”ah$]”h&]”uh1h´hhòhžhhŸh³h K:ubhµ)”}”(hhh]”(hº)”}”(hŒSR-IOV support”h]”hŒSR-IOV support”…””}”(hjZhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1h¹hjWhžhhŸh³h KDubj)”}”(hXõHyper-V supports SR-IOV as a hardware acceleration option. If SR-IOV is enabled in both the vSwitch and the guest configuration, then the Virtual Function (VF) device is passed to the guest as a PCI device. In this case, both a synthetic (netvsc) and VF device are visible in the guest OS and both NIC's have the same MAC address. The VF is enslaved by netvsc device. The netvsc driver will transparently switch the data path to the VF when it is available and up. Network state (addresses, firewall, etc) should be applied only to the netvsc device; the slave device should not be accessed directly in most cases. The exceptions are if some special queue discipline or flow direction is desired, these should be applied directly to the VF slave device. ”h]”(hÛ)”}”(hXJHyper-V supports SR-IOV as a hardware acceleration option. If SR-IOV is enabled in both the vSwitch and the guest configuration, then the Virtual Function (VF) device is passed to the guest as a PCI device. In this case, both a synthetic (netvsc) and VF device are visible in the guest OS and both NIC's have the same MAC address.”h]”hXLHyper-V supports SR-IOV as a hardware acceleration option. If SR-IOV is enabled in both the vSwitch and the guest configuration, then the Virtual Function (VF) device is passed to the guest as a PCI device. In this case, both a synthetic (netvsc) and VF device are visible in the guest OS and both NIC’s have the same MAC address.”…””}”(hjlhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h KEhjhubhÛ)”}”(hX¨The VF is enslaved by netvsc device. The netvsc driver will transparently switch the data path to the VF when it is available and up. Network state (addresses, firewall, etc) should be applied only to the netvsc device; the slave device should not be accessed directly in most cases. The exceptions are if some special queue discipline or flow direction is desired, these should be applied directly to the VF slave device.”h]”hX¨The VF is enslaved by netvsc device. The netvsc driver will transparently switch the data path to the VF when it is available and up. Network state (addresses, firewall, etc) should be applied only to the netvsc device; the slave device should not be accessed directly in most cases. The exceptions are if some special queue discipline or flow direction is desired, these should be applied directly to the VF slave device.”…””}”(hjzhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h KKhjhubeh}”(h]”h ]”h"]”h$]”h&]”uh1jhŸh³h KEhjWhžhubeh}”(h]”Œsr-iov-support”ah ]”h"]”Œsr-iov support”ah$]”h&]”uh1h´hhòhžhhŸh³h KDubhµ)”}”(hhh]”(hº)”}”(hŒReceive Buffer”h]”hŒReceive Buffer”…””}”(hj™hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1h¹hj–hžhhŸh³h KTubj)”}”(hX2Packets are received into a receive area which is created when device is probed. The receive area is broken into MTU sized chunks and each may contain one or more packets. The number of receive sections may be changed via ethtool Rx ring parameters. There is a similar send buffer which is used to aggregate packets for sending. The send area is broken into chunks, typically of 6144 bytes, each of section may contain one or more packets. Small packets are usually transmitted via copy to the send buffer. However, if the buffer is temporarily exhausted, or the packet to be transmitted is an LSO packet, the driver will provide the host with pointers to the data from the SKB. This attempts to achieve a balance between the overhead of data copy and the impact of remapping VM memory to be accessible by the host. ”h]”(hÛ)”}”(hŒùPackets are received into a receive area which is created when device is probed. The receive area is broken into MTU sized chunks and each may contain one or more packets. The number of receive sections may be changed via ethtool Rx ring parameters.”h]”hŒùPackets are received into a receive area which is created when device is probed. The receive area is broken into MTU sized chunks and each may contain one or more packets. The number of receive sections may be changed via ethtool Rx ring parameters.”…””}”(hj«hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h KUhj§ubhÛ)”}”(hX6There is a similar send buffer which is used to aggregate packets for sending. The send area is broken into chunks, typically of 6144 bytes, each of section may contain one or more packets. Small packets are usually transmitted via copy to the send buffer. However, if the buffer is temporarily exhausted, or the packet to be transmitted is an LSO packet, the driver will provide the host with pointers to the data from the SKB. This attempts to achieve a balance between the overhead of data copy and the impact of remapping VM memory to be accessible by the host.”h]”hX6There is a similar send buffer which is used to aggregate packets for sending. The send area is broken into chunks, typically of 6144 bytes, each of section may contain one or more packets. Small packets are usually transmitted via copy to the send buffer. However, if the buffer is temporarily exhausted, or the packet to be transmitted is an LSO packet, the driver will provide the host with pointers to the data from the SKB. This attempts to achieve a balance between the overhead of data copy and the impact of remapping VM memory to be accessible by the host.”…””}”(hj¹hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h KZhj§ubeh}”(h]”h ]”h"]”h$]”h&]”uh1jhŸh³h KUhj–hžhubeh}”(h]”Œreceive-buffer”ah ]”h"]”Œreceive buffer”ah$]”h&]”uh1h´hhòhžhhŸh³h KTubhµ)”}”(hhh]”(hº)”}”(hŒ XDP support”h]”hŒ XDP support”…””}”(hjØhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1h¹hjÕhžhhŸh³h Keubj)”}”(hXXDP (eXpress Data Path) is a feature that runs eBPF bytecode at the early stage when packets arrive at a NIC card. The goal is to increase performance for packet processing, reducing the overhead of SKB allocation and other upper network layers. hv_netvsc supports XDP in native mode, and transparently sets the XDP program on the associated VF NIC as well. Setting / unsetting XDP program on synthetic NIC (netvsc) propagates to VF NIC automatically. Setting / unsetting XDP program on VF NIC directly is not recommended, also not propagated to synthetic NIC, and may be overwritten by setting of synthetic NIC. XDP program cannot run with LRO (RSC) enabled, so you need to disable LRO before running XDP:: ethtool -K eth0 lro off XDP_REDIRECT action is not yet supported.”h]”(hÛ)”}”(hŒõXDP (eXpress Data Path) is a feature that runs eBPF bytecode at the early stage when packets arrive at a NIC card. The goal is to increase performance for packet processing, reducing the overhead of SKB allocation and other upper network layers.”h]”hŒõXDP (eXpress Data Path) is a feature that runs eBPF bytecode at the early stage when packets arrive at a NIC card. The goal is to increase performance for packet processing, reducing the overhead of SKB allocation and other upper network layers.”…””}”(hjêhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h KfhjæubhÛ)”}”(hŒohv_netvsc supports XDP in native mode, and transparently sets the XDP program on the associated VF NIC as well.”h]”hŒohv_netvsc supports XDP in native mode, and transparently sets the XDP program on the associated VF NIC as well.”…””}”(hjøhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h KkhjæubhÛ)”}”(hŒþSetting / unsetting XDP program on synthetic NIC (netvsc) propagates to VF NIC automatically. Setting / unsetting XDP program on VF NIC directly is not recommended, also not propagated to synthetic NIC, and may be overwritten by setting of synthetic NIC.”h]”hŒþSetting / unsetting XDP program on synthetic NIC (netvsc) propagates to VF NIC automatically. Setting / unsetting XDP program on VF NIC directly is not recommended, also not propagated to synthetic NIC, and may be overwritten by setting of synthetic NIC.”…””}”(hjhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h KnhjæubhÛ)”}”(hŒ^XDP program cannot run with LRO (RSC) enabled, so you need to disable LRO before running XDP::”h]”hŒ]XDP program cannot run with LRO (RSC) enabled, so you need to disable LRO before running XDP:”…””}”(hjhžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h Kshjæubj’)”}”(hŒethtool -K eth0 lro off”h]”hŒethtool -K eth0 lro off”…””}”hj"sbah}”(h]”h ]”h"]”h$]”h&]”h±h²uh1j‘hŸh³h KvhjæubhÛ)”}”(hŒ)XDP_REDIRECT action is not yet supported.”h]”hŒ)XDP_REDIRECT action is not yet supported.”…””}”(hj0hžhhŸNh Nubah}”(h]”h ]”h"]”h$]”h&]”uh1hÚhŸh³h Kxhjæubeh}”(h]”h ]”h"]”h$]”h&]”uh1jhŸh³h KfhjÕhžhubeh}”(h]”Œ xdp-support”ah ]”h"]”Œ xdp support”ah$]”h&]”uh1h´hhòhžhhŸh³h Keubeh}”(h]”Œfeatures”ah ]”h"]”Œfeatures”ah$]”h&]”uh1h´hh¶hžhhŸh³h Kubeh}”(h]”Œhyper-v-network-driver”ah ]”h"]”Œhyper-v network driver”ah$]”h&]”uh1h´hhhžhhŸh³h Kubeh}”(h]”h ]”h"]”h$]”h&]”Œsource”h³uh1hŒcurrent_source”NŒ current_line”NŒsettings”Œdocutils.frontend”ŒValues”“”)”}”(h¹NŒ generator”NŒ datestamp”NŒ source_link”NŒ source_url”NŒ toc_backlinks”Œentry”Œfootnote_backlinks”KŒ sectnum_xform”KŒstrip_comments”NŒstrip_elements_with_classes”NŒ strip_classes”NŒ report_level”KŒ halt_level”KŒexit_status_level”KŒdebug”NŒwarning_stream”NŒ traceback”ˆŒinput_encoding”Œ utf-8-sig”Œinput_encoding_error_handler”Œstrict”Œoutput_encoding”Œutf-8”Œoutput_encoding_error_handler”jŒerror_encoding”Œutf-8”Œerror_encoding_error_handler”Œbackslashreplace”Œ language_code”Œen”Œrecord_dependencies”NŒconfig”NŒ id_prefix”hŒauto_id_prefix”Œid”Œ dump_settings”NŒdump_internals”NŒdump_transforms”NŒdump_pseudo_xml”NŒexpose_internals”NŒstrict_visitor”NŒ_disable_config”NŒ_source”h³Œ _destination”NŒ _config_files”]”Œ7/var/lib/git/docbuild/linux/Documentation/docutils.conf”aŒfile_insertion_enabled”ˆŒ raw_enabled”KŒline_length_limit”M'Œpep_references”NŒ pep_base_url”Œhttps://peps.python.org/”Œpep_file_url_template”Œpep-%04d”Œrfc_references”NŒ rfc_base_url”Œ&https://datatracker.ietf.org/doc/html/”Œ tab_width”KŒtrim_footnote_reference_space”‰Œsyntax_highlight”Œlong”Œ smart_quotes”ˆŒsmartquotes_locales”]”Œcharacter_level_inline_markup”‰Œdoctitle_xform”‰Œ docinfo_xform”KŒsectsubtitle_xform”‰Œ image_loading”Œlink”Œembed_stylesheet”‰Œcloak_email_addresses”ˆŒsection_self_link”‰Œenv”NubŒreporter”NŒindirect_targets”]”Œsubstitution_defs”}”Œsubstitution_names”}”Œrefnames”}”Œrefids”}”Œnameids”}”(jYjVhïhìjQjNj3j0jäjájjjTjQj“jjÒjÏjIjFuŒ nametypes”}”(jY‰hï‰jQ‰j3‰jä‰j‰jT‰j“‰jÒ‰jI‰uh}”(jVh¶hìhÉjNhòj0jjáj6jjçjQjjjWjÏj–jFjÕuŒ footnote_refs”}”Œ citation_refs”}”Œ autofootnotes”]”Œautofootnote_refs”]”Œsymbol_footnotes”]”Œsymbol_footnote_refs”]”Œ footnotes”]”Œ citations”]”Œautofootnote_start”KŒsymbol_footnote_start”KŒ id_counter”Œ collections”ŒCounter”“”}”…”R”Œparse_messages”]”Œtransform_messages”]”Œ transformer”NŒ include_log”]”Œ decoration”Nhžhub.