笔记.js 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371
  1. datasets中的orientation为模型整体绕z轴的旋转角度, 初始为0
  2. filter中的:
  3. 数据集校准后不变的值有:
  4. dataset_orientation( 永远存储最初始的点位的quaternion, 在旋转时也不变, 因此它等于没有旋转时的orientation)-- --image.datasetOrientation
  5. dataset_floor_orientation( 一般和dataset_orientation值一样)
  6. dataset_location 真实的三维坐标
  7. dataset_floor_location
  8. 数据集校准后改变的值有:
  9. orientation-- --image.orientation( 在旋转时实时变, 且是根据模型旋转度数和dataset_orientation来算的, 所以如果dataset_orientation不对, 就会算错。)
  10. location-- --image.location xy为经纬度
  11. floor_location
  12. -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- -- --
  13. 查看全局:
  14. var view = window.IV.getMainView()
  15. view.currentImage.id
  16. view.ImageService.images
  17. POI: 热点 PoiService PoiEditorDirective PoiEntity
  18. t.prototype.isPreviewMenuVisible-- --canDisplayResultDetails-- - PoiService.openedPoi - setOpenedPoi
  19. MeasurementLineMaterial: 测量线材质, 有蓝色标准实线和灰色透明虚线两种状态
  20. 数据集校准 saveAlignment = selectedDatasets
  21. this.underlayScene.children[3] 包含32个子mesh, 是全景图sphere 其材质fragment在下方
  22. overlayScene 里有marker, name: "location"?
  23. QuaternionFactory VectorFactory
  24. 加载深度图loadDepthImage 获取深度值getDepth( 用于更新reticule位置)。 深度图用于修改全景图sphere的gl_FragDepthEXT
  25. getCoordinates doPointCloudPicking doDepthImagePicking
  26. t.NORMAL = "normal",
  27. t.DATASET_ALIGNMENT = "datasetAlignment",
  28. t.GEO_REGISTRATION = "GeoRegistration",
  29. t.SITE_MODEL_EDITOR = "SiteModelEditor",
  30. t.NAV_GRAPH_EDITOR = "NavGraphEditor",
  31. t.DOWNLOAD_POINT_CLOUD = "DownloadPointCloud",
  32. t.MEASUREMENTS = "Measurements"
  33. //--关于地图和 地图上的图片-------关键词mapSizeM
  34. updateSubTiles更新地图tile, 如果不存在就加载
  35. //图片上传https://testlaser.4dkankan.com/maxkk/t-iksBApb/locat/addDataSet.html
  36. var QuaternionFactory = { // 同 IndoorViewerAPI的 QuaternionFactory.toArray
  37. toArray: function(quaternion) {
  38. var rot90 = (new THREE.Quaternion).setFromAxisAngle(new THREE.Vector3(0, 0, 1), THREE.Math.degToRad(-90)) //add 转入时旋转90度
  39. ,
  40. rot90Invert = rot90.clone().inverse() //add 转出时旋回90度
  41. var t1 = quaternion.clone().multiply(rot90Invert);
  42. var e = t1.toArray();
  43. return [e[3], e[0], e[1], e[2]]
  44. }
  45. }
  46. //获取旋转:
  47. var getQuaternion = function(angle) { //angle:0-360 角度
  48. var quaternion = new THREE.Quaternion().setFromEuler(new THREE.Euler(0, 0, THREE.Math.degToRad(-angle)));
  49. return QuaternionFactory.toArray(quaternion)
  50. }
  51. //获取缩放
  52. var getSize = function(imgWidth, scale) { //imgWidth:图片宽度, scale缩放值(x==y)
  53. var level = imgWidth / 1024; //以1024为基准
  54. return 95.54628610610962 * level * scale; // 95.54628610610962 = 38.21851444244385 * (2+0.5), 其中38.21851444244385 = mapSizeM / Math.pow(2,maxDepth) = 40075017 / Math.pow(2,20) 可能表示地图在缩放zoom为20时的单块宽度
  55. //0.5是试出来的,因为图片层的bias=0.5, 暂不知道其用处,所以试用了下……
  56. //另:可能不是*2.5, 也许是*256/100 ? 不知道如何精确测试下
  57. //有出现过一次错误是2048时的图但是大了一倍,发现是传图的那个网页在缩放值为0.1(即图为1:1显示,函数canvasFunction(extent, scale )时只有1024大小,后来刷新重新操作就是2048然后就正确。所以可能是这个网页出错。
  58. }
  59. //位置直接使用中心点的经纬度
  60. //-------------------------------------------------
  61. ===
  62. === = shader === === =
  63. 全景图 fragment
  64. uniform sampler2D map;
  65. uniform float opacity;
  66. varying vec2 vUv;
  67. #
  68. ifdef USE_ALPHAMAP
  69. uniform sampler2D alphaMap;#
  70. endif
  71. # ifdef GL_EXT_frag_depth
  72. uniform sampler2D depthMap;
  73. uniform mat4 inverseProjectionMatrix;
  74. uniform mat4 projectionMatrix;
  75. uniform vec4 viewport;#
  76. endif
  77. void main() {
  78. vec4 color = texture2D(map, vUv);
  79. float alpha = opacity;
  80. #
  81. ifdef USE_ALPHAMAP
  82. alpha *= texture2D(alphaMap, vUv).g;#
  83. endif
  84. gl_FragColor = vec4(color.r, color.g, color.b, alpha);
  85. #
  86. ifdef GL_EXT_frag_depth
  87. /*
  88. * Useful resources:
  89. *
  90. * https://www.khronos.org/opengl/wiki/Vertex_Post-Processing#Viewport_transform
  91. * Clipping, perspective divide viewport transform
  92. *
  93. * https://www.khronos.org/opengl/wiki/Compute_eye_space_from_window_space
  94. * From window (viewport) space back to eye space in GLSL
  95. *
  96. * https://www.khronos.org/opengl/wiki/Vertex_Transformation
  97. * Summary of transformations object -> world -> eye (camera, view) -> clip -> NDC -> window
  98. *
  99. * http://slideplayer.com/slide/6837153/#
  100. * Overview presentation
  101. *
  102. * http://www.shaderific.com/glsl-variables/
  103. * GLSL built-in variables
  104. */
  105. vec4 depth = texture2D(depthMap, vUv);
  106. //float distance = depth.r + 256. * (depth.g + 256. * depth.b);
  107. //distance *= 255. * .001; // distance is now in meters
  108. //更改
  109. float distance = (depth.g + depth.r / 256.) * 255.; //为什么要乘以255
  110. // return r[1] + r[0] / 256
  111. vec4 ndcPos;
  112. ndcPos.xy = ((2.0 * gl_FragCoord.xy) - (2.0 * viewport.xy)) / (viewport.zw) - 1.;
  113. ndcPos.z = (2.0 * gl_FragCoord.z - gl_DepthRange.near - gl_DepthRange.far) /
  114. (gl_DepthRange.far - gl_DepthRange.near);
  115. ndcPos.w = 1.0;
  116. vec4 clipPos = ndcPos / gl_FragCoord.w;
  117. vec4 eyePos = inverseProjectionMatrix * clipPos;
  118. distance += .1; // add a safety margin
  119. vec4 eyePos2 = vec4(normalize(eyePos.xyz) * distance, 1.);
  120. vec4 clipPos2 = projectionMatrix * eyePos2;
  121. vec4 ndcPos2 = clipPos2 * 1. / clipPos2.w;
  122. gl_FragDepthEXT = 0.5 * ((gl_DepthRange.far - gl_DepthRange.near) * ndcPos2.z +
  123. gl_DepthRange.near + gl_DepthRange.far);
  124. #
  125. endif
  126. }
  127. -- -- -- --
  128. MeasurementLineMaterial vertex
  129. "attribute vec3 previous;
  130. attribute vec3 next;
  131. attribute float side;
  132. attribute float width;
  133. attribute float counters;
  134. uniform vec2 resolution;
  135. uniform float lineWidth;
  136. uniform vec3 color;
  137. uniform float opacity;
  138. uniform float near;
  139. uniform float far;
  140. uniform float sizeAttenuation;
  141. uniform vec3 dashColor;
  142. uniform float dashOpacity;
  143. varying vec2 vUV;
  144. varying vec4 vColor;
  145. varying vec4 vDashColor;
  146. varying float vCounters;
  147. vec2 fix(vec4 i, float aspect) {
  148. vec2 res = i.xy / i.w;
  149. res.x *= aspect;
  150. vCounters = counters;
  151. return res;
  152. }
  153. // This vertex shader is a copy of the one supplied by MeshLineMaterial.
  154. // It supports drawing dashed lines.
  155. void main() {
  156. float aspect = resolution.x / resolution.y;
  157. float pixelWidthRatio = 1.0 / (resolution.x * projectionMatrix[0][0]);
  158. vColor = vec4(color, opacity);
  159. vDashColor = vec4(dashColor, dashOpacity);
  160. vUV = uv;
  161. mat4 m = projectionMatrix * modelViewMatrix;
  162. vec4 finalPosition = m * vec4(position, 1.0);
  163. vec4 prevPos = m * vec4(previous, 1.0);
  164. vec4 nextPos = m * vec4(next, 1.0);
  165. vec2 currentP = fix(finalPosition, aspect);
  166. vec2 prevP = fix(prevPos, aspect);
  167. vec2 nextP = fix(nextPos, aspect);
  168. float pixelWidth = finalPosition.w * pixelWidthRatio;
  169. float w = 1.8 * pixelWidth * lineWidth * width;
  170. if (sizeAttenuation == 1.0) {
  171. w = 1.8 * lineWidth * width;
  172. }
  173. vec2 dir;
  174. if (nextP == currentP) {
  175. dir = normalize(currentP - prevP);
  176. } else if (prevP == currentP) {
  177. dir = normalize(nextP - currentP);
  178. } else {
  179. vec2 dir1 = normalize(currentP - prevP);
  180. vec2 dir2 = normalize(nextP - currentP);
  181. dir = normalize(dir1 + dir2);
  182. vec2 perp = vec2(-dir1.y, dir1.x);
  183. vec2 miter = vec2(-dir.y, dir.x);
  184. }
  185. vec2 normal = vec2(-dir.y, dir.x);
  186. normal.x /= aspect;
  187. normal *= .5 * w;
  188. vec4 offset = vec4(normal * side, 0.0, 1.0);
  189. finalPosition.xy += offset.xy;
  190. gl_Position = finalPosition;
  191. }
  192. -- -- -- --
  193. MeasurementLineMaterial fragment
  194. uniform sampler2D map;
  195. uniform sampler2D alphaMap;
  196. uniform float useMap;
  197. uniform float useAlphaMap;
  198. uniform float useDash;
  199. uniform float dashArray;
  200. uniform float dashOffset;
  201. uniform float dashRatio;
  202. uniform float visibility;
  203. uniform float alphaTest;
  204. uniform vec2 repeat;
  205. uniform sampler2D depthTexture;
  206. uniform sampler2D rgbaTexture;
  207. uniform float nearPlane;
  208. uniform float farPlane;
  209. uniform float occlusionDistance;
  210. uniform float clipDistance;
  211. uniform vec2 viewportSize;
  212. uniform vec2 viewportOffset;
  213. varying vec2 vUV;
  214. varying vec4 vColor;
  215. varying vec4 vDashColor;
  216. varying float vCounters;
  217. // Converts the exponential depth value from the depth buffer to a linear value
  218. // See https://learnopengl.com/Advanced-OpenGL/Depth-testing for more information about this formula
  219. float convertToLinear(float zValue) {
  220. float z = zValue * 2.0 - 1.0;
  221. return (2.0 * nearPlane * farPlane) / (farPlane + nearPlane - z * (farPlane - nearPlane));
  222. }
  223. void main() {
  224. vec4 c = vDashColor;
  225. // <-- The following section of the shader is copied from MeshLineMaterial
  226. // Sample the fragment from a texture if such is supplied
  227. if (useMap == 1.0) {
  228. c *= texture2D(map, vUV * repeat);
  229. }
  230. // Sample the fragment's alpha value from an alpha texture if such is supplied
  231. if (useAlphaMap == 1.0) {
  232. c.a *= texture2D(alphaMap, vUV * repeat).a;
  233. }
  234. // Discard the fragment if below the alpha threshold
  235. if (c.a < alphaTest) {
  236. discard;
  237. }
  238. // If the line is dashed, set the alpha value of the fragment according to the line segment it belongs to
  239. if (useDash == 1.0) {
  240. c.a *= ceil(mod(vCounters + dashOffset, dashArray) - (dashArray * dashRatio));
  241. }
  242. // <-- end of copied code
  243. #
  244. ifdef GL_EXT_frag_depth
  245. // mixFactor and clipFactor define the color mixing proportion between the states of
  246. // full visibility and occluded visibility
  247. // and
  248. // full visibility and total invisibility
  249. float mixFactor = 0.0;
  250. float clipFactor = 0.0;
  251. // The linear depth value of the current fragment
  252. float fragDepth = convertToLinear(gl_FragCoord.z);
  253. // The coordinates of the current fragment in the depth texture
  254. vec2 depthTxtCoords = vec2(gl_FragCoord.x - viewportOffset.x, gl_FragCoord.y) / viewportSize;
  255. // The linear depth value of the pixel occupied by this fragment in the depth buffer
  256. float textureDepth = convertToLinear(texture2D(depthTexture, depthTxtCoords).r);
  257. // The difference between the two depths
  258. float delta = textureDepth - fragDepth;
  259. if (delta < 0.0) {
  260. // occlusionDistance and clipDistance define the width of the respective zones and
  261. // mixFactor and clipFactor express the interpolation between the two colors depending on the position
  262. // of the current fragment withing those zones.
  263. mixFactor = clamp(delta / occlusionDistance, 0.0, 1.0);
  264. clipFactor = clamp(delta / clipDistance, 0.0, 1.0);
  265. }
  266. // If the fragment is totally transparent, don't bother drawing it
  267. if (clipFactor == 1.0) {
  268. discard;
  269. }#
  270. else
  271. float mixFactor = 0.0;
  272. float clipFactor = 0.0;#
  273. endif
  274. // Calculate the color of the dashed version of the line
  275. vec4 backColor = vec4(c.rgb, c.a * step(vCounters, visibility));
  276. // Mix between the solid and the dahsed versions of the line according to the mixFactor
  277. gl_FragColor = mix(vColor, backColor, mixFactor);
  278. // Set the alpha value of the fragment according to the clipFactor
  279. // Note that clipFactor was previously clamped [0.0;1.0]
  280. gl_FragColor.a *= (1.0 - clipFactor);
  281. }