Posts

Sort by:
Post not yet marked as solved
1 Replies
6 Views
The following is a UIKit app with a collection view with one section, whose supplementary view sports a search bar. When you type, it often resigns first responder. import UIKit class ViewController: UIViewController { let words = ["foo", "bar"] var filteredWords = ["foo", "bar"] { didSet { dataSource.apply(self.snapshot) } } var collectionView: UICollectionView! var dataSource: UICollectionViewDiffableDataSource<String, String>! var snapshot: NSDiffableDataSourceSnapshot<String, String> { var snapshot = NSDiffableDataSourceSnapshot<String, String>() snapshot.appendSections(["main"]) snapshot.appendItems(filteredWords) return snapshot } override func viewDidLoad() { super.viewDidLoad() navigationItem.rightBarButtonItem = .init(title: "Apply", style: .plain, target: self, action: #selector(apply)) configureHierarchy() configureDataSource() } @objc func apply() { dataSource.apply(self.snapshot) } func configureHierarchy() { collectionView = .init(frame: .zero, collectionViewLayout: createLayout()) view.addSubview(collectionView) collectionView.frame = view.bounds collectionView.autoresizingMask = [.flexibleWidth, .flexibleHeight] } func createLayout() -> UICollectionViewLayout { UICollectionViewCompositionalLayout { section, layoutEnvironment in var config = UICollectionLayoutListConfiguration(appearance: .insetGrouped) config.headerMode = .supplementary return NSCollectionLayoutSection.list(using: config, layoutEnvironment: layoutEnvironment) } } func configureDataSource() { let cellRegistration = UICollectionView.CellRegistration<UICollectionViewListCell, String> { _, _, _ in } dataSource = .init(collectionView: collectionView) { collectionView, indexPath, itemIdentifier in collectionView.dequeueConfiguredReusableCell(using: cellRegistration, for: indexPath, item: itemIdentifier) } let searchbarHeaderRegistration = UICollectionView.SupplementaryRegistration<SearchBarCell>(elementKind: UICollectionView.elementKindSectionHeader) { cell, elementKind, indexPath in cell.searchBar.delegate = self } dataSource.supplementaryViewProvider = { collectionView, kind, indexPath in collectionView.dequeueConfiguredReusableSupplementary(using: searchbarHeaderRegistration, for: indexPath) } dataSource.apply(self.snapshot, animatingDifferences: false) } } extension ViewController: UISearchBarDelegate { func searchBar(_ searchBar: UISearchBar, textDidChange searchText: String) { DispatchQueue.main.async { [weak self] in guard let self else { return } filteredWords = words.filter { $0.hasPrefix(searchText) } } } func searchBarTextDidEndEditing(_ searchBar: UISearchBar) { DispatchQueue.main.async { [weak self] in guard let self else { return } filteredWords = words } } } class SearchBarCell: UICollectionViewListCell { let searchBar = UISearchBar() override init(frame: CGRect) { super.init(frame: frame) contentView.addSubview(searchBar) searchBar.pinToSuperview() } required init?(coder: NSCoder) { fatalError("init(coder:) has not been implemented") } } extension UIView { func pin( to object: CanBePinnedTo, top: CGFloat = 0, bottom: CGFloat = 0, leading: CGFloat = 0, trailing: CGFloat = 0 ) { self.translatesAutoresizingMaskIntoConstraints = false NSLayoutConstraint.activate([ self.topAnchor.constraint(equalTo: object.topAnchor, constant: top), self.bottomAnchor.constraint(equalTo: object.bottomAnchor, constant: bottom), self.leadingAnchor.constraint(equalTo: object.leadingAnchor, constant: leading), self.trailingAnchor.constraint(equalTo: object.trailingAnchor, constant: trailing), ]) } func pinToSuperview( top: CGFloat = 0, bottom: CGFloat = 0, leading: CGFloat = 0, trailing: CGFloat = 0, file: StaticString = #file, line: UInt = #line ) { guard let superview = self.superview else { print(">> \(#function) failed in file: \(String.localFilePath(from: file)), at line: \(line): could not find \(Self.self).superView.") return } self.pin(to: superview, top: top, bottom: bottom, leading: leading, trailing: trailing) } func pinToSuperview(constant c: CGFloat = 0, file: StaticString = #file, line: UInt = #line) { self.pinToSuperview(top: c, bottom: -c, leading: c, trailing: -c, file: file, line: line) } } @MainActor protocol CanBePinnedTo { var topAnchor: NSLayoutYAxisAnchor { get } var bottomAnchor: NSLayoutYAxisAnchor { get } var leadingAnchor: NSLayoutXAxisAnchor { get } var trailingAnchor: NSLayoutXAxisAnchor { get } } extension UIView: CanBePinnedTo { } extension UILayoutGuide: CanBePinnedTo { } extension String { static func localFilePath(from fullFilePath: StaticString = #file) -> Self { URL(fileURLWithPath: "\(fullFilePath)").lastPathComponent } } By the way if you remove the dispatch blocks, the app simply crashes complaining that you're trying to apply datasource snapshots from both the main queue and other queues. Xcode 15.3 iOS 17.4 simulator, MacBook Air M1 8GB macOS Sonoma 14.4.1
Posted
by
Post not yet marked as solved
1 Replies
8 Views
Hi everyone I have an app already on-line on app store connect and I am looking for a way to upload a batch of in-app purchased items (csv file or similar): Can you help? Thank you, Federico
Posted
by
Post not yet marked as solved
2 Replies
15 Views
As an exercise in learning Swift, I rewrote a toy C++ command line tool in Swift. After switching to an UnsafeRawBufferPointer in a critical part of the code, the Release build of the Swift version was a little faster than the Release build of the C++ version. But the Debug build took around 700 times as long. I expect a Debug build to be somewhat slower, but by that much? Here's the critical part of the code, a function that gets called many thousands of times. The two string parameters are always 5-letter words in plain ASCII (it's related to Wordle). By the way, if I change the loop ranges from 0..<5 to [0,1,2,3,4], then it runs about twice as fast in Debug, but twice as slow in Release. func Score( trial: String, target: String ) -> Int { var score = 0 withUnsafeBytes(of: trial.utf8) { rawTrial in withUnsafeBytes(of: target.utf8) { rawTarget in for i in 0..<5 { let trial_i = rawTrial[i]; if trial_i == rawTarget[i] // strong hit { score += kStrongScore } else // check for weak hit { for j in 0..<5 { if j != i { let target_j = rawTarget[j]; if (trial_i == target_j) && (rawTrial[j] != target_j) { score += kWeakScore break } } } } } } } return score }
Posted
by
Post not yet marked as solved
0 Replies
10 Views
Please treat me as a beginner of Unity. Now I want to learn to develop visionOS VR App through unity. I try to find a relatively complete route and start learning, but Unity's official website does not have much explanation for visionOS VR App, so I hope you can give me a comparison. The whole route, thank you!
Posted
by
Post not yet marked as solved
0 Replies
10 Views
I have a customer who wants to protect the REST API of their app with a private certificate. They would then distribute the client certificate to the authorized users. Their app would not work unless the client certificate is already installed on the user's phone before they run the app. I have never done this before. Is it possible to install a client certificate on an iPhone without running an app, for example if it were sent in an email message? And if it is possible, is App Review going to let such an app into the app store? Thanks, Frank
Posted
by
Post not yet marked as solved
0 Replies
10 Views
An error occurred. Unable to import “Apple Development: Name ()”. Error: -25294 Mac mini M1 macOS Sonoma Version 14.4.1
Posted
by
Post not yet marked as solved
0 Replies
12 Views
I noticed that I have duplicate Apps on my iPhone! I've signed up for the Public Beta program, so this might be a feature/bug in the Beta. What I first noticed was that one of my apps showed up in the Doc (I don't remember putting it there) a few days ago, and in looking around, I still have the same app in a folder on my home screen. I thought this could be a nice feature allowing me quick access to it from the Dock and from my folder, but it wasn't the app that I'd want there. I thought I might be able to move it off the dock and back to the folder, but when I did that, the app showed up twice in the folder! I moved a different app to the dock, and it disappeared from the folder which is what I'd expect. So...if I delete 1 of the duplicates in the folder (or from the dock), will that delete both? Here you can see the Alula app in both the 'Remote Control' folder and the Dock: Here too (although the dock is fuzzy because the folder is open): I moved the app from the dock back to the folder, and now there are two there: Here's the Public Beta that I'm running
Posted
by
Post not yet marked as solved
0 Replies
12 Views
Hello, everyone. I'm using Bubble.io to develop a matching app for a transportation company and try to publish it to the App Store by using BDK plugin. The URL of the matching app is as follows https://carrier-s.com I received the following message from the App Store, and I'm not sure what to do. The いいね can be purchased in the app using payment mechanisms other than in-app purchase. If anyone knows anything about in-app purchases, please let me know. Currently, payments are made via Stripe.
Posted
by
Post not yet marked as solved
0 Replies
21 Views
Hello, I've been trying to get my org approved but Apple keeps asking me for documents. I have uploaded certificates of incorporations and that still doesn't work. I wish they were more clear on what type of documentation I need to provide. [Edited by Moderator]
Posted
by
Post not yet marked as solved
0 Replies
17 Views
I am trying to determine the corners of a RoomPlan-detected wall using the information available in the ARView session's frame, but can't quite figure out what I'm doing wrong. The corners appear to be correct relative to each other, but the wall appears too large when I render it. (I'm also not sure I'm handling the image rotation correctly either, which may be compounding my problem). Here is the code I currently have, along with a sample image, and the resulting image when I pass it through the perspective filter. it is close but isn't cropping the walls and floors correctly. func captureSession(_ session: RoomCaptureSession, didChange room: CapturedRoom) { for surface in room.walls { if let frame = self.arView.session.currentFrame { var image: CGImage? = nil VTCreateCGImageFromCVPixelBuffer(frame.capturedImage, options: nil, imageOut: &image) let wallTransform = surface.transform let cameraTransform = frame.camera.transform let intrinsics = frame.camera.intrinsics let projectionMatrix = frame.camera.projectionMatrix let width = surface.dimensions.y let height = surface.dimensions.x let inverseCameraTransform = simd_inverse(cameraTransform) let wallTopRight = simd_float4(width/2, height/2, 0, 1) let wallTopLeft = simd_float4(-width/2, height/2, 0, 1) let wallBottomRight = simd_float4(width/2, -height/2, 0, 1) let wallBottomLeft = simd_float4(-width/2, -height/2, 0, 1) let worldTopRight = wallTransform * wallTopRight let worldTopLeft = wallTransform * wallTopLeft let worldBottomRight = wallTransform * wallBottomRight let worldBottomLeft = wallTransform * wallBottomLeft let cameraTopRight = projectionMatrix * inverseCameraTransform * worldTopRight let cameraTopLeft = projectionMatrix * inverseCameraTransform * worldTopLeft let cameraBottomRight = projectionMatrix * inverseCameraTransform * worldBottomRight let cameraBottomLeft = projectionMatrix * inverseCameraTransform * worldBottomLeft let imageTopRight = intrinsics * simd_float3(cameraTopRight.x / cameraTopRight.w, cameraTopRight.y / cameraTopRight.w, cameraTopRight.z / cameraTopRight.w) let imageTopLeft = intrinsics * simd_float3(cameraTopLeft.x / cameraTopLeft.w, cameraTopLeft.y / cameraTopLeft.w, cameraTopLeft.z / cameraTopLeft.w) let imageBottomRight = intrinsics * simd_float3(cameraBottomRight.x / cameraBottomRight.w, cameraBottomRight.y / cameraBottomRight.w, cameraBottomRight.z / cameraBottomRight.w) let imageBottomLeft = intrinsics * simd_float3(cameraBottomLeft.x / cameraBottomLeft.w, cameraBottomLeft.y / cameraBottomLeft.w, cameraBottomLeft.z / cameraBottomLeft.w) let topRight = CGPoint(x: CGFloat(imageTopRight.x), y: CGFloat(imageTopRight.y)) let topLeft = CGPoint(x: CGFloat(imageTopLeft.x), y: CGFloat(imageTopLeft.y)) let bottomRight = CGPoint(x: CGFloat(imageBottomRight.x), y: CGFloat(imageBottomRight.y)) let bottomLeft = CGPoint(x: CGFloat(imageBottomLeft.x), y: CGFloat(imageBottomLeft.y)) if let image { let filter = CIFilter.perspectiveCorrection() filter.inputImage = CIImage(image: UIImage(cgImage: image)) filter.topRight = topRight filter.topLeft = topLeft filter.bottomRight = bottomRight filter.bottomLeft = bottomLeft let transformedImage = filter.outputImage if let transformedImage { let context = CIContext() if let outputImage = context.createCGImage(transformedImage, from: transformedImage.extent) { let wall = Wall(id: surface.identifier, image: outputImage, surface: surface) self.walls.append(wall) } } } } } }
Posted
by
Post not yet marked as solved
0 Replies
42 Views
I am developing a parent child control app using Screen time API and Family Control. I created two apps, one for parent and another for child. I want to see child device's activity report on parent app. This functionality works when there is only one parent/organiser. I am trying to add multiple parents to access device activity report using screen time API. I created a family group where I am the organiser (Dad), added another account as parent (Mom) and two child accounts. On the child's device I installed the app, authorised the app for parental approval (Dad) and screen time restrictions. When using the parent app as Mom, I am unable to fetch the child device's activity report.
Posted
by
Post not yet marked as solved
0 Replies
25 Views
Hello, I have some question about the usage of the function: func update(NEFilterSocketFlow, using: NEFilterDataVerdict, for: NETrafficDirection) (https://developer.apple.com/documentation/networkextension/nefilterdataprovider/3543400-update) provided by the NEFilterDataProvider class of the content filter network extension. If I understand correctly, this function can be used on an instance of NEFilterDataProvider to update an already issued verdict for a network flow. By "issuing verdict" I mean returning any of .allow()/.drop()/.init(pass: peek:) in handleNewFlow/handleInboundData/handleOutboundData However, I am having difficulty with it. My workflow involves maintaining an array of currently active flows. Flows are inserted in handleNewFlow() and they are deleted when handleReport(report: NEFilterReport) with event flowClosed is called (flow identification is based on their UUID). Then, at some point in future, based on our business logic, I iterate through the container of "active flows" and attempt to call func update(NEFilterSocketFlow, using: NEFilterDataVerdict, for: NETrafficDirection) on all of them, with intention of changing the already issued verdict. However, calling that function seems to have no effect. Am I using it the wrong way? What is the intended usage? Is it even possible to update verdict of already allowed or postponed by .init(peek:pass:) flows? The issue I'm trying to solve is that we evaluate flows based on our business logic and return either .drop() or .init(pass: peek:) verdicts for them. Sometimes, we want to reevaluate the .init(pass: peek:) verdict immediately, which is when we attempt to call the update() function and provide a new .init(pass:peek) or .drop() verdict. The main objective is to promptly drop certain flows, particularly those awaiting further data evaluation due to .init(pass: peek), immediately on demand. Thanks.
Posted
by
Post not yet marked as solved
0 Replies
12 Views
I have my own framework, Which is working fine since long time and also compiling in xcode 15.2. As soon as i updated xcode to xcode 15.3, My code is not compiling. I am getting below error. module '' does not use additional module map '.framework/Modules/module.modulemap' not used when the module was built I have both objective c and swift files in my framework.
Posted
by
Post not yet marked as solved
0 Replies
17 Views
Hello, I have created a Neural Network → K Nearest Neighbors Classifier with python. # followed by k-Nearest Neighbors for classification. import coremltools import coremltools.proto.FeatureTypes_pb2 as ft from coremltools.models.nearest_neighbors import KNearestNeighborsClassifierBuilder import copy # Take the SqueezeNet feature extractor from the Turi Create model. base_model = coremltools.models.MLModel("SqueezeNet.mlmodel") base_spec = base_model._spec layers = copy.deepcopy(base_spec.neuralNetworkClassifier.layers) # Delete the softmax and innerProduct layers. The new last layer is # a "flatten" layer that outputs a 1000-element vector. del layers[-1] del layers[-1] preprocessing = base_spec.neuralNetworkClassifier.preprocessing # The Turi Create model is a classifier, which is treated as a special # model type in Core ML. But we need a general-purpose neural network. del base_spec.neuralNetworkClassifier.layers[:] base_spec.neuralNetwork.layers.extend(layers) # Also copy over the image preprocessing options. base_spec.neuralNetwork.preprocessing.extend(preprocessing) # Remove other classifier stuff. base_spec.description.ClearField("metadata") base_spec.description.ClearField("predictedFeatureName") base_spec.description.ClearField("predictedProbabilitiesName") # Remove the old classifier outputs. del base_spec.description.output[:] # Add a new output for the feature vector. output = base_spec.description.output.add() output.name = "features" output.type.multiArrayType.shape.append(1000) output.type.multiArrayType.dataType = ft.ArrayFeatureType.FLOAT32 # Connect the last layer to this new output. base_spec.neuralNetwork.layers[-1].output[0] = "features" # Create the k-NN model. knn_builder = KNearestNeighborsClassifierBuilder(input_name="features", output_name="label", number_of_dimensions=1000, default_class_label="???", number_of_neighbors=3, weighting_scheme="inverse_distance", index_type="linear") knn_spec = knn_builder.spec knn_spec.description.input[0].shortDescription = "Input vector" knn_spec.description.output[0].shortDescription = "Predicted label" knn_spec.description.output[1].shortDescription = "Probabilities for each possible label" knn_builder.set_number_of_neighbors_with_bounds(3, allowed_range=(1, 10)) # Use the same name as in the neural network models, so that we # can use the same code for evaluating both types of model. knn_spec.description.predictedProbabilitiesName = "labelProbability" knn_spec.description.output[1].name = knn_spec.description.predictedProbabilitiesName # Put it all together into a pipeline. pipeline_spec = coremltools.proto.Model_pb2.Model() pipeline_spec.specificationVersion = coremltools._MINIMUM_UPDATABLE_SPEC_VERSION pipeline_spec.isUpdatable = True pipeline_spec.description.input.extend(base_spec.description.input[:]) pipeline_spec.description.output.extend(knn_spec.description.output[:]) pipeline_spec.description.predictedFeatureName = knn_spec.description.predictedFeatureName pipeline_spec.description.predictedProbabilitiesName = knn_spec.description.predictedProbabilitiesName # Add inputs for training. pipeline_spec.description.trainingInput.extend([base_spec.description.input[0]]) pipeline_spec.description.trainingInput[0].shortDescription = "Example image" pipeline_spec.description.trainingInput.extend([knn_spec.description.trainingInput[1]]) pipeline_spec.description.trainingInput[1].shortDescription = "True label" pipeline_spec.pipelineClassifier.pipeline.models.add().CopyFrom(base_spec) pipeline_spec.pipelineClassifier.pipeline.models.add().CopyFrom(knn_spec) pipeline_spec.pipelineClassifier.pipeline.names.extend(["FeatureExtractor", "kNNClassifier"]) coremltools.utils.save_spec(pipeline_spec, "../Models/FaceDetection.mlmodel") it is from the following tutorial: https://machinethink.net/blog/coreml-training-part3/ It Works and I were am to include it into my project: I want to train the model via the MLUpdateTask: ar batchInputs: [MLFeatureProvider] = [] let imageconstraint = (model.model.modelDescription.inputDescriptionsByName["image"]?.imageConstraint) let imageOptions: [MLFeatureValue.ImageOption: Any] = [ .cropAndScale: VNImageCropAndScaleOption.scaleFill.rawValue] var featureProviders = [MLFeatureProvider]() //URLS where images are stored let trainingData = ImageManager.getImagesAndLabel() for data in trainingData{ let label = data.key for imgURL in data.value{ let featureValue = try MLFeatureValue(imageAt: imgURL, constraint: imageconstraint!, options: imageOptions) if let pixelBuffer = featureValue.imageBufferValue{ let featureProvider = FaceDetectionTrainingInput(image: pixelBuffer, label: label) batchInputs.append(featureProvider)}} let trainingData = MLArrayBatchProvider(array: batchInputs) When calling the MLUpdateTask as follows, the context.model from completionHandler is null. Unfortunately there is no other Information available from the compiler. do{ debugPrint(context) try context.model.write(to: ModelManager.targetURL) } catch{ debugPrint("Error saving the model \(error)") } }) updateTask.resume() I get the following error when I want to access the context.model: Thread 5: EXC_BAD_ACCESS (code=1, address=0x0) Can some1 more experienced tell me how to fix this? It seems like I am missing some parameters? I am currently not splitting the Data when training into train and test data. only preprocessing im doing is scaling the image down to 227x227 pixels. Thanks!
Posted
by
Post not yet marked as solved
0 Replies
15 Views
On ios15, when you minimise app and then come back to it, labels in tabs become truncated. When you tap on them they return to normal state again, after that you can minimise again and everything will be ok This only happens for a specific font size, in my case it is system 12 bold/regular Start screen: After returning to the app: Is there something I can do about it?
Posted
by
Post not yet marked as solved
0 Replies
16 Views
Hi! I allready have started a product page optimization tests in the past, but now I want to test different versions of my icon. But I can't see the option to change the icons, how can I do that?
Posted
by
Post not yet marked as solved
0 Replies
21 Views
Hey, I have an application for professionals in the medical field and their patients, and I have a question about AppStore Guideline - 1.2 User-Generated Content. In the application there is a one-to-one connection between a professional and a patient. I want to add comments and chat features but I don't know if I need to do something regarding to 1.2. I don't expect to have any abusive content from any user due to professional service so the question is do I need to implement all those mechanizm to filter the content? If I need to implement them - can I only add a "Report" button in settings to report?
Posted
by
Post not yet marked as solved
0 Replies
28 Views
After upgrading to iOS 17, Thread Performance Checker is complaining of priority inversion when converting a CVPixelBuffer to UIImage through a CIImage instance. It might be a false-positive or an issue? - (UIImage *)imageForSampleBuffer:(CMSampleBufferRef)sampleBuffer andOrientation:(UIImageOrientation)orientation { CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); CIImage *ciImage = [CIImage imageWithCVPixelBuffer:pixelBuffer]; UIImage *uiImage = [UIImage imageWithCIImage:ciImage]; NSData *data = UIImageJPEGRepresentation(uiImage, 90); } The code snippet above, when running in a thread set to the default priority results in the message below: Thread Performance Checker: Thread running at User-interactive quality-of-service class waiting on a lower QoS thread running at Default quality-of-service class. Investigate ways to avoid priority inversions PID: 1188, TID: 723209 Backtrace ================================================================= 3 AGXMetalG14 0x0000000235c77cc8 1FEF1F89-B467-37B0-86F8-E05BC8A2A629 + 2927816 4 AGXMetalG14 0x0000000235ccd784 1FEF1F89-B467-37B0-86F8-E05BC8A2A629 + 3278724 5 AGXMetalG14 0x0000000235ccf6a4 1FEF1F89-B467-37B0-86F8-E05BC8A2A629 + 3286692 6 MetalTools 0x000000022f758b68 E712D983-01AD-3FE5-AB66-E00ABF76CD7F + 568168 7 CoreImage 0x00000001a7c0e580 3D2AC243-0880-3BA9-BBF3-A214454875E0 + 267648 8 CoreImage 0x00000001a7d0cc08 3D2AC243-0880-3BA9-BBF3-A214454875E0 + 1309704 9 CoreImage 0x00000001a7c0e2e0 3D2AC243-0880-3BA9-BBF3-A214454875E0 + 266976 10 CoreImage 0x00000001a7c0e1d0 3D2AC243-0880-3BA9-BBF3-A214454875E0 + 266704 11 libdispatch.dylib 0x0000000105e4a7bc _dispatch_client_callout + 20 12 libdispatch.dylib 0x0000000105e5be24 _dispatch_lane_barrier_sync_invoke_and_complete + 176 13 CoreImage 0x00000001a7c0a784 3D2AC243-0880-3BA9-BBF3-A214454875E0 + 251780 14 CoreImage 0x00000001a7c0a46c 3D2AC243-0880-3BA9-BBF3-A214454875E0 + 250988 15 libdispatch.dylib 0x0000000105e5b764 _dispatch_block_async_invoke2 + 148 16 libdispatch.dylib 0x0000000105e4a7bc _dispatch_client_callout + 20 17 libdispatch.dylib 0x0000000105e5266c _dispatch_lane_serial_drain + 832 18 libdispatch.dylib 0x0000000105e5343c _dispatch_lane_invoke + 460 19 libdispatch.dylib 0x0000000105e524a4 _dispatch_lane_serial_drain + 376 20 libdispatch.dylib 0x0000000105e5343c _dispatch_lane_invoke + 460 21 libdispatch.dylib 0x0000000105e60404 _dispatch_root_queue_drain_deferred_wlh + 328 22 libdispatch.dylib 0x0000000105e5fa38 _dispatch_workloop_worker_thread + 444 23 libsystem_pthread.dylib 0x00000001f35a4f20 _pthread_wqthread + 288 24 libsystem_pthread.dylib 0x00000001f35a4fc0 start_wqthread + 8
Posted
by

TestFlight Public Links

Get Started

Pinned Posts

Categories

See all