Visual Text Recognition
Work with text in images, just like you work with encoded text.
Visual text recognition helps you convert printed text in images and videos into machine-encoded text. You can input a scanned document, a photo of a document, a scene-photo (such as the text on signs and billboards), or text superimposed on an image (such as in a meme) and output the words and individual characters present in the images. VTR lets you "digitize" text so that it can be edited, searched, stored, displayed and analyzed.
Please note: The current version of our VTR model is not designed for use with handwritten text, or documents with tightly-packed text (like you might see on the page of a novel, for example).

How VTR works

VTR works by first detecting the location of text in your photos or video frames, then cropping the region where the text is present, and then finally running a specialized classification model that will extract text from the cropped image. To accomplish these different tasks, you will need to configure a workflow. You will then add these three models to your workflow:
  • Visual Text Detection
  • 1.0 Cropper
  • Visual Text Recognition

Building a VTR workflow

gRPC Python
gRPC Java
gRPC NodeJS
cURL
1
# Insert here the initialization code as outlined on this page:
2
# https://docs.clarifai.com/api-guide/api-overview/api-clients#client-installation-instructions
3
4
post_workflows_response = stub.PostWorkflows(
5
service_pb2.PostWorkflowsRequest(
6
user_app_id=userDataObject, # The userDataObject is created in the overview and is required when using a PAT
7
workflows=[
8
resources_pb2.Workflow(
9
id="visual-text-recognition-id",
10
nodes=[
11
resources_pb2.WorkflowNode(
12
id="detect-concept",
13
model=resources_pb2.Model(
14
id="2419e2eae04d04f820e5cf3aba42d0c7",
15
model_version=resources_pb2.ModelVersion(
16
id="75a5b92a0dec436a891b5ad224ac9170"
17
)
18
)
19
),
20
resources_pb2.WorkflowNode(
21
id="image-crop",
22
model=resources_pb2.Model(
23
id="ce3f5832af7a4e56ae310d696cbbefd8",
24
model_version=resources_pb2.ModelVersion(
25
id="a78efb13f7774433aa2fd4864f41f0e6"
26
)
27
),
28
node_inputs=[
29
resources_pb2.NodeInput(node_id="detect-concept")
30
]
31
),
32
resources_pb2.WorkflowNode(
33
id="image-to-text",
34
model=resources_pb2.Model(
35
id="9fe78b4150a52794f86f237770141b33",
36
model_version=resources_pb2.ModelVersion(
37
id="d94413e582f341f68884cac72dbd2c7b"
38
)
39
),
40
node_inputs=[
41
resources_pb2.NodeInput(node_id="image-crop")
42
]
43
),
44
]
45
)
46
]
47
),
48
metadata=metadata
49
)
50
51
if post_workflows_response.status.code != status_code_pb2.SUCCESS:
52
raise Exception("Post workflows failed, status: " + post_workflows_response.status.description)
Copied!
1
import com.clarifai.grpc.api.*;
2
import com.clarifai.grpc.api.status.*;
3
4
// Insert here the initialization code as outlined on this page:
5
// https://docs.clarifai.com/api-guide/api-overview/api-clients#client-installation-instructions
6
7
MultiWorkflowResponse postWorkflowsResponse = stub.postWorkflows(
8
PostWorkflowsRequest.newBuilder()
9
.setUserAppId(UserAppIDSet.newBuilder().setAppId("{YOUR_APP_ID}"))
10
.addWorkflows(
11
Workflow.newBuilder()
12
.setId("visual-text-recognition-id")
13
.addNodes(
14
WorkflowNode.newBuilder()
15
.setId("detect-concept")
16
.setModel(
17
Model.newBuilder()
18
.setId("2419e2eae04d04f820e5cf3aba42d0c7")
19
.setModelVersion(
20
ModelVersion.newBuilder()
21
.setId("75a5b92a0dec436a891b5ad224ac9170")
22
)
23
)
24
)
25
.addNodes(
26
WorkflowNode.newBuilder()
27
.setId("image-crop")
28
.setModel(
29
Model.newBuilder()
30
.setId("ce3f5832af7a4e56ae310d696cbbefd8")
31
.setModelVersion(
32
ModelVersion.newBuilder()
33
.setId("a78efb13f7774433aa2fd4864f41f0e6")
34
)
35
)
36
.addNodeInputs(NodeInput.newBuilder().setNodeId("detect-concept"))
37
)
38
.addNodes(
39
WorkflowNode.newBuilder()
40
.setId("image-to-text")
41
.setModel(
42
Model.newBuilder()
43
.setId("9fe78b4150a52794f86f237770141b33")
44
.setModelVersion(
45
ModelVersion.newBuilder()
46
.setId("d94413e582f341f68884cac72dbd2c7b")
47
)
48
)
49
.addNodeInputs(NodeInput.newBuilder().setNodeId("image-crop"))
50
)
51
)
52
.build()
53
);
54
55
if (postWorkflowsResponse.getStatus().getCode() != StatusCode.SUCCESS) {
56
throw new RuntimeException("Post workflows failed, status: " + postWorkflowsResponse.getStatus());
57
}
Copied!
1
// Insert here the initialization code as outlined on this page:
2
// https://docs.clarifai.com/api-guide/api-overview/api-clients#client-installation-instructions
3
4
stub.PostWorkflows(
5
{
6
user_app_id: {
7
app_id: "e83440590d104cee97ef84af1856837d"
8
},
9
workflows: [
10
{
11
id: "visual-text-recognition-id",
12
nodes: [
13
{
14
id: "detect-concept",
15
model: {
16
id: "2419e2eae04d04f820e5cf3aba42d0c7",
17
model_version: {
18
id: "75a5b92a0dec436a891b5ad224ac9170"
19
}
20
}
21
},
22
{
23
id: "image-crop",
24
model: {
25
id: "ce3f5832af7a4e56ae310d696cbbefd8",
26
model_version: {
27
id: "a78efb13f7774433aa2fd4864f41f0e6"
28
}
29
},
30
node_inputs: [
31
{node_id: "detect-concept"}
32
]
33
},
34
{
35
id: "image-to-text",
36
model: {
37
id: "9fe78b4150a52794f86f237770141b33",
38
model_version: {
39
id: "d94413e582f341f68884cac72dbd2c7b"
40
}
41
},
42
node_inputs: [
43
{node_id: "image-crop"}
44
]
45
},
46
]
47
}
48
]
49
},
50
metadata,
51
(err, response) => {
52
if (err) {
53
throw new Error(err);
54
}
55
56
if (response.status.code !== 10000) {
57
console.log(response.status);
58
throw new Error("Post workflows failed, status: " + response.status.description);
59
}
60
}
61
);
Copied!
1
curl -X POST 'https://api.clarifai.com/v2/users/me/apps/{{app}}/workflows' \
2
-H 'Authorization: Key {{PAT}}' \
3
-H 'Content-Type: application/json' \
4
--data-raw '{
5
"workflows": [
6
{
7
"id": "visual-text-recognition-id",
8
"nodes": [
9
{
10
"id": "detect-concept",
11
"model": {
12
"id": "2419e2eae04d04f820e5cf3aba42d0c7",
13
"model_version": {
14
"id": "75a5b92a0dec436a891b5ad224ac9170"
15
}
16
}
17
},
18
{
19
"id": "image-crop",
20
"model": {
21
"id": "ce3f5832af7a4e56ae310d696cbbefd8",
22
"model_version": {
23
"id": "a78efb13f7774433aa2fd4864f41f0e6"
24
}
25
},
26
"node_inputs": [
27
{
28
"node_id": "general-concept"
29
}
30
]
31
},
32
{
33
"id": "image-to-text",
34
"model": {
35
"id": "9fe78b4150a52794f86f237770141b33",
36
"model_version": {
37
"id": "d94413e582f341f68884cac72dbd2c7b"
38
}
39
},
40
"node_inputs": [
41
{
42
"node_id": "image-crop"
43
}
44
]
45
},
46
]
47
}
48
]
49
}'
Copied!
Last modified 1mo ago
Copy link
Edit on GitHub