This is an example of a technique that's shown in the Vue manual, basically you maintain two pieces of state, use one as the backing store and use a watcher to update the derived state using tweening mechanisms such as TweenLite in this case. Shown with a few hacks that are necessary to get it working with the d3-cluster node representation.

<template>
  <div>
    <svg viewBox="0 0 1024 768" width="1024" height="768" 
         xmlns="http://www.w3.org/2000/svg">
      <circle v-for="descendant in tweenedDescendants"
              :cx="descendant.x"
              :cy="descendant.y"
              r="10"
              stroke="red" fill="grey"/>
    </svg>

    <button v-on:click="relayout">Relayout</button>
  </div>
</template>

<script lang="ts">
import Vue from 'vue';
import {hierarchy, cluster, HierarchyNode} from 'd3';
import _ from 'lodash';
import {TweenLite} from 'gsap';

const familyData = {
    "name": "Eve",
    "children": [
        {
            "name": "Cain"
        },
        {
            "name": "Seth",
            "children": [
                {
                    "name": "Enos"
                },
                {
                    "name": "Noam"
                }
            ]
        },
        {
            "name": "Abel"
        },
        {
            "name": "Awan",
            "children": [
                {
                    "name": "Enoch"
                }
            ]
        },
        {
            "name": "Azura"
        }
    ]
};


interface FamilyDatum {
};

function initialCluster(val: any): HierarchyNode<FamilyDatum> {
    const theHierarchy = hierarchy(val);
    const clusterLayout = cluster();
    clusterLayout.size([1024, 768]);
    clusterLayout(theHierarchy);
    return theHierarchy;
}

export default Vue.extend({
    data() {

        return {
            hierarchyRoot: initialCluster(familyData) as HierarchyNode<FamilyDatum>,
            tweenedHierarchyRoot: initialCluster(familyData) as HierarchyNode<FamilyDatum>
        };
    },
    mounted() {
        window.setInterval(this.relayout, 1000);
    },
    methods: {
        relayout() {
            const clusterLayout = cluster();

            // these hacks necessary because cluster() itself does some array
            // mutations that fool the reactivity
            const clonedHierarchy = _.clone(this.hierarchyRoot);
            clusterLayout.size([Math.random() * 1024, Math.random() * 768]);
            clusterLayout(clonedHierarchy);

            this.hierarchyRoot = clonedHierarchy;
        },
    },
    computed: {
        descendants(): HierarchyNode<FamilyDatum>[] {
            return this.hierarchyRoot.descendants();
        },
        tweenedDescendants(): HierarchyNode<FamilyDatum>[] {
            return this.tweenedHierarchyRoot.descendants();
        },
    },
    watch: {
        // No deep watch needed because we always reassign the whole
        // array
        hierarchyRoot(val, oldVal) {
            console.log("inside handler function");
            // We know that descendants() function returns references to
            // the nodes that can be directly modified by mutation.
            // Because the x and y values have already been declared in the
            // data() method  -- that is, cluster() was already called once
            // to add the properties -- Vue knows that it should react to changes
            // on these properties.

            const targetDescendants = val.descendants();
            const tweenedDescendants = this.tweenedHierarchyRoot.descendants();

            for (var i = 0; i < tweenedDescendants.length; i++) {
                const node = tweenedDescendants[i];

                const targetX = targetDescendants[i].x;
                const targetY = targetDescendants[i].y;

                TweenLite.to(node, 0.5, {x: targetX, y: targetY});
            }
        }
    }
});
</script>

<elyts lang="less">
</style>
Posted 2019-01-06

Here is a simple component for Vue/TS that demonstrates using the transition-group directive to collapse the width of elements when they are removed from a list.

One catch is that an unlabelled 'div' is inserted as the parent of all list items. This can cause problems when styling things using flexbox, which treats only its direct children as layout items. Also, if you have styling applied to the children, applying styling to the transition classes won't override those styles, because the transition style may be less specific than the styles on the child elements.

<template>
  <div class="home">

    <div class="container">
      <transition-group name="mytrans" tag="div">
        <div v-for="(box, index) in boxes"
             v-if="box.isVisible"
             :key="box.id"
             class="box">
          {{box.id}}
          <button v-on:click="hide(index)">Hide</button>
        </div>
      </transition-group>
    </div>
  </div>
</template>

<script lang="ts">
import Vue from 'vue';
import _ from 'lodash';

interface Box {
    isVisible: boolean;
    id: number;
}


export default Vue.extend({
    name: 'home',
    data() {
        return {
            boxes: [] as Box[]
        };
    },
    components: {},
    created() {
        for (var i = 0; i < 5; i++) {
            this.boxes.push({isVisible: true, id: i});
        }
    },
    methods: {
        hide(index: number): void {
            this.boxes[index].isVisible = false;
        }
    }
});
</script>

<elyts>
.container {
    height: 200px;
    background-color: blue;
}

/* Because vue transition creates a wrapper div around the transition'ed elements,
   we need to style that div to layout the elements correctly. */
.container > div {
    display: flex;
    flex-direction: row;
}

.box {
    height: 150px;
    width: 150px;
    margin: 16px;
    background-color: green;
}


.mytrans-leave-active {
    transition: all 0.5s ease-in;
}

.mytrans-leave-to {
    width: 0px;
}
</style>
Posted 2018-12-18

Setup procedure

Install git, this provides the git-http-backend. yum install git

Install httpd. yum install httpd

SetEnv GIT_PROJECT_ROOT /srv/git
SetEnv GIT_HTTP_EXPORT_ALL
ScriptAlias /git/ /usr/libexec/git-core/git-http-backend/

<Files "git-http-backend">
    AuthType Basic
    AuthName "Git Access"
    AuthUserFile /srv/git/.htpasswd
    Require expr !(%{QUERY_STRING} -strmatch '*service=git-receive-pack*' || %{REQUEST_URI} =~ m#/git-receive-pack$#)
    Require valid-user
</Files>

Create the git root and configure the users

mkdir /srv/git
htpasswd -c /srv/git/.htpasswd db57

Test procedure

On the server:

cd /srv/git
mkdir my-test-repository
cd my-test-repository
git init --bare
chown -R apache:apache /srv/git

On a client:

mkdir my-test-repository
cd my-test-repository
git init
echo "test" > README.md
git add -A
git commit -m 'initial import'
git remote add origin http://localhost/git/my-test-repository
git push -u origin master

As far as I know, all repositories need to be created server-side before you are allowed to push to them.

From outside

Where the IP of your server is 10.179.127.226,

git clone http://10.179.127.226/git/my-test-repository

Cloning is unauthenticated, only push is authenticated.

Posted 2018-11-29

An extremely minimal way to publish your Vue component. You already have your project. I assume and hope you are using vue-cli 3.0. If not, immediately switch to it, vue create mypackage and port your project into the new structure.

To get the build for your project, you use the special lib build target to the vue-cli-service build subcommand. You can invoke this as such:

amoe@cslp019129 $ ./node_modules/.bin/vue-cli-service build --target lib --name amoe-butterworth-widgets src/components/TaxonSelect.vue

Here, amoe-butterworth-widgets is the name of the library that you intend to publish it. In this case, I'm publishing it as an unscoped public package, this is just the regular form of npm publishing that you all know and (hah) love.

TaxonSelect.vue will be exposed as the default export of the build module.

The build will produce several files under the dist subdirectory. You are looking for the UMD build. You'll find a file dist/amoe-butterworth-widgets.umd.js. Now you need to add a key to package.json.

{
...
    "name": "amoe-butterworth-widgets",
    "main": "dist/amoe-butterworth-widgets.umd.js",
    "version": "0.1.0",
    "license": "MIT",
...
}

It's wise to set a license and to obey semver as appropriate.

Now you need to be logged in before you can actually publish. Run npm adduser.

Once you've done this, simply run npm publish. Your package will be pushed up and made available at http://npm.js.com/package/mypackage, where mypackage was specified in the name field of package.json.

When someone runs npm install mypackage, they'll get what is more-or-less a copy of your source tree. As far as I can see, npm doesn't attempt to clean your tree or reproduce your build in any way. So make sure that anything you don't want to be public is scrubbed before running npm publish.

When the user wants to actually use your component, TaxonSelect.vue is the default export, as mentioned above. So to use it, they just type import TaxonSelect from 'mypackage', and TaxonSelectis then a component that they can register in their components object. There are ways to export multiple components from a module, but that's outside the scope of this article.

Posted 2018-11-21

Suppose you want to encode the Australian flag, you may consider this to be one simple emoji character. Actually you're in for a surprise, perhaps, because emojis aren't always represented as a single character, many emojis are combinations of multiple Unicode code points.

For instance, the Australian flag may be represented as two 8-byte code points.

U+0001F1E6
U+0001F1FA

This happens to display as a single glyph, the Australian flag, on some platforms, but may also display as two separate glyphs.

However, some ways of representing text only support encoding of 4-byte code points, those in the range U+0000 to U+FFFF. JSON is one of these. When we attempt to escape the 8-byte characters (which, note, do not need to be escaped under the JSON spec), we get a result that looks like two different codepoints. Quoth RFC 7159:

To escape an extended character that is not in the Basic Multilingual
Plane, the character is represented as a 12-character sequence,
encoding the UTF-16 surrogate pair.  So, for example, a string
containing only the G clef character (U+1D11E) may be represented as
"\uD834\uDD1E".

So in fact, the Australian flag may be concretely represented in escaped JSON as this string:

"\ud83c\udde6\ud83c\uddfa"

As you can see, the last two hex digits of these escape pairs (e6, fa) matches to the last bytes of the 8-byte code points above.

Posted 2018-09-14

I've been holding off a bit on the food posts of late, because when I enter the kitchen these days it's often in a flurry of inspiration and I don't have the wherewithal to fetch the camera. I have been continuing to document, though. I've been repeating several recipes multiple times in different variations, particularly the cheesesteak which I've probably made about 4 or 5 times by now. I've tried it with various cuts, from the prohibitively expensive, authentic but delicious rib eye, the wonderful and reasonably priced rump steak, and the difficult-to-find skirt steak. I've also been experimenting with using beef seasoning, particularly the Rajah brand, which deepens the flavour immensely. It actually deepens the flavour so much I wonder if it might be too much, if it's disguising the flavour of the cut itself. But I can't be sure without more testing.

The biggest discovery that I made was to actually cook the steak properly as steak rather than the sauted-beef method that's popular and probably more authentic. This means the standard steak cooking method of browning in a hot pan. I cook it until medium rare. You can feel free to do it as rare as you like, because you can easily cook it more at the saute stage. Then remove from the pan and deglaze, reserving the fond. You let the steak cool and slice it into whatever consistency you like. I do chunky pieces. Then cook the rest of the recipe with a covered pan and lastly add in the steak chunks with the various juices. You'll have incredibly juicy cheesesteak mix and chunks that yield to the teeth.

I've found that toasting the ciabatta lightly in the grill before putting in the filling enhances the comfort-feeling of the sandwich. It's controversial but I've started adding cold mayonnaise after the steak mixture. I think this adds a lovely sharpness which offsets the giant umami hit from the brown stuff. This does require a bit more care in adding the cheese, though, because you have to ensure the cheese is melted properly before adding it to the sandwich, so you need to somehow melt the cheese onto the steak mix before. I'm thinking of even trying a lemony mayonnaise? Sounds crazy but perhaps it could work. I tried gherkins as a condiment and found them rather disappointing, I really expected the pickle flavour to work well but for some reason it didn't. Jalapenos work better than gherkins, although I'm not sure I'd consider this a spicy sandwich.

I've also had some thoughts on soto ayam, having re-cooked it recently with excellent results. Thigh works better than breast for this in my view, although I'd certainly remove the skin next time. Two teaspoons of sambal ulek is enough to cover about one bowl when eating with rice. I found out that I really don't like the Maggi Malaysian sauce and need to stop eating it. However, the notorious Maggi liquid seasoning does go very well with reduced-rice that's destined for a South Asian dish, such as rice that you might drown in a big bowl of soto. I can't tell what it is with these dishes but they just go down with such aplomb, I feel like I could eat bowls and bowls of them without ever being sated. This is a characteristic of both Owen's nasi goreng and Owen's soto ayam, though curiously not the pangek ikan. I think the latter may be because the fat content from the coconut milk tends to fill you up. The only common characteristic between those two dishes that I can see is that they're both lean and pungent, getting their kick from a vinegary sambal.

I got some experience in braising from attempting to cook a Mexican short rib in adobo having been inspired by the menu at a local restaurant. But although I succeeded in making edible food twice, both attempts were almost but not entirely unlike the restaurant version, and both were utterly different from each other to boot. The first was smoky, deep, but slightly bitter, having got burned from excessively high oven heat and having the sauce scraped and melded with it: I tempered it with some muscovado sugar, but it stayed questionable. The second was the opposite: done on a low heat for an extremely long time, it fell off the bone more satisfactorily than the first one, but doesn't seem to have absorbed so much flavour in comparison to the first one. Perhaps the truth lies in the middle. Who knew slow cooking could be so difficult?

Posted 2018-08-29

Goal: perform a basic query of our database through an autogenerated GraphQL backend.

For this task we use Postgraphile, a tool modelled on Postgrest that generates an API server based on introspecting the database schema.

We will also use the Apollo GraphQL client and the vue-apollo integration.
These seem to be the most widely used libraries.

Connecting to GraphQL

In your main entry point, you can use this setup code:

import { ApolloClient } from 'apollo-client';
import { HttpLink } from 'apollo-link-http';
import { InMemoryCache } from 'apollo-cache-inmemory';
import VueApollo from 'vue-apollo';

const localApi = "/api";

const client = new ApolloClient({
    link: new HttpLink({ uri: localApi }),
    cache: new InMemoryCache()
});

const apolloProvider = new VueApollo({
    defaultClient: client
});

Vue.use(VueApollo);

document.addEventListener("DOMContentLoaded", e => {
    const vueInstance = new Vue({
        render: h => h(ApplicationRoot),
        provide: apolloProvider.provide()
    });
    vueInstance.$mount('#vue-outlet');
});

Your API will be available under the "/api" prefix to avoid CORS issues. You configure this in webpack with the following stanza:

devServer: {
    port: 57242,
    proxy: {
        "/api": {
            target: "http://localhost:5000/graphql",
            pathRewrite: {"^/api": ""}
        }
    }
}

The Postgraphile server will appear on port 5000 by default.

Starting the server

This is extremely simple.

amoe@klauwier $ sudo yarn global add postgraphile
amoe@klauwier $ postgraphile -c postgres://localhost/mol_viewer

PostGraphile server listening on port 5000 🚀

  ‣ Connected to Postgres instance postgres://localhost:5432/mol_viewer
  ‣ Introspected Postgres schema(s) public
  ‣ GraphQL endpoint served at http://localhost:5000/graphql
  ‣ GraphiQL endpoint served at http://localhost:5000/graphiql

* * *

Your database here is mol_viewer. A service should start on port 5000.

Writing a sample query

Using the module graphql-tag, you can define your queries using template literals. This will syntax-check your queries at compile time.

const demoQuery = gql`
{
  allParticipants {
    nodes {
     reference
    }
  }
}`

This looks slightly weird, but the important thing to know here is that I have a table already in my database schema called participant. Postgraphile has inferred a collection of objects, therefore, called allParticipants. My table has a field called reference (which can be of any type). nodes is (I believe) a Postgraphile-specific property of the list allParticipants. That is to say, the text nodes above has nothing to do with the database schema of mol_viewer, rather it's an artifact of using Postgraphile.

Link the query to your page

When you have set up vue-apollo, you have access to a apollo property on your Vue instance. In this case it looks as such.

apollo: {
    allParticipants: demoQuery
}

Here, allParticipants is a name which must be the same as the top-level result of the query. It's not just an arbitrary identifier.

Now, allParticipants field exists in your component's data object. The query is automatically made when you load your page, and allParticipants will be populated. You can demonstrate this through the simple addition of {{allParticipants}} to your template.

To iterate through the query results, you have to consider that the results of the query are always shaped like the query itself. So your basic list of the results will look as follows.

<ol>
  <li v-for="node in allParticipants.nodes">
    {{node.reference}}
  </li>
</ol>

Filtering

Postgraphile supports filtering using the simple condition on the auto-generated allParticipants field. For instance, imagine filtering by a given reference. You'd do allParticipants(condition: {reference: "A14"}). Only simple equality is supported out of the box (!). Plugins are available.

Posted 2018-07-20

This is a bit tricky because multiple ways of doing it are documented. This is the way that eventually worked for me.

The top-level SConstruct is as normal for an out-of-source build, it reads

SConscript('src/SConscript', variant_dir='build')

You need a header so that your program can recognize the version number. In C++ this is as follows, in src/version.hh:

extern char const* const version_string;

You can define the version that you want to update in a file named version which is in the root of the repository. It should have no other content other than the version number, perhaps along with a newline.

0.0.1

Now the src/SConscript file should look like this:

env = Environment()

# The version file is located in the file called 'version' in the very root
# of the repository.
VERSION_FILE_PATH = '#version'

# Note: You absolutely need to have the #include below, or you're going to get
# an 'undefined reference' message due to the use of const.  (it's the second
# const in the type declaration that causes this.)
#
# Both the user of the version and this template itself need to include the
# extern declaration first.

def version_action(target, source, env):
    source_path = source[0].path
    target_path = target[0].path

    # read version from plaintext file
    with open(source_path, 'r') as f:
        version = f.read().rstrip()

    version_c_text = """
    #include "version.hh"

    const char* const version_string = "%s";
    """ % version

    with open(target_path, 'w') as f:
        f.write(version_c_text)

    return 0

env.Command(
    target='version.cc',
    source=VERSION_FILE_PATH,
    action=version_action
)

main_binary = env.Program(
    'main', source=['main.cc', 'version.cc']
)

The basic strategy here is to designate the version file as the source file for version.cc, but we just hardcode the template for the actual C++ definition inside the SConscript itself. Note that the include within the template is crucial, due to an 'aspect' of the C++ compilation process.

Posted 2018-06-08

This is really tricky. There are several hurdles you face.

First hurdle: importing the Leaflet CSS files from your node_modules folder and incorporating this into your Webpack build.

The canonical form for this is as follows:

@import url("~leaflet/dist/leaflet.css");

The tilde is a documented but obscure shortcut for a vendored module found under node_modules. There's no way to avoid hardcoding the path dist/leaflet.css.

Once you've done this, you'll have a non-broken map view, but you still won't be able to view marker images. You'll be seeing that the CSS attempts to load images but isn't able to load them. Then you'll try to apply file-loader, but due to a similar issue to one described on React, you'll note that file-loader or url-loader generate broken paths with strange hashing symbols in them.

Luckily, there's a fix for this! You'll notice this solution in the thread, from user PThomir:

import L from 'leaflet';

L.Icon.Default.imagePath = '.';
// OR
delete L.Icon.Default.prototype._getIconUrl;

L.Icon.Default.mergeOptions({
  iconRetinaUrl: require('leaflet/dist/images/marker-icon-2x.png'),
  iconUrl: require('leaflet/dist/images/marker-icon.png'),
  shadowUrl: require('leaflet/dist/images/marker-shadow.png'),
});

This is now getting very close. However, you'll try to adapt this, using import instead of require, because TypeScript doesn't know about require.

You'll get examples like this:

Cannot find module 'leaflet/dist/images/marker-icon-2x.png'

But you'll look for the file and it'll clearly be there. Puzzling. Until you realize you've missed a key point: Webpack's require and TypeScript's import are completely different animals. More specifically: Only Webpack's require knows about Webpack's loaders. So when you might try to import the PNG,

import iconRetinaUrl from 'leaflet/dist/images/marker-icon-2x.png';

This is actually intercepted by the TypeScript compiler and causes a compile error. We need to find some way to use Webpack's require from typescript. Luckily this isn't too difficult. You need to create a type signature for this call as such.

// This is required to use Webpack loaders, cf https://stackoverflow.com/a/36151803/257169

declare function require(string): any;

Put this somewhere in your search path for modules, as webpack-require.d.ts. Remember you don't explicitly import .d.ts file. So now just use require in your entry.ts file as before.

My eventual snippet looked as follows:

const leaflet = require('leaflet');

delete leaflet.Icon.Default.prototype._getIconUrl;

const iconRetinaUrl = require('leaflet/dist/images/marker-icon-2x.png');
const iconUrl = require('leaflet/dist/images/marker-icon.png');
const shadowUrl = require('leaflet/dist/images/marker-shadow.png');

leaflet.Icon.Default.mergeOptions({ iconRetinaUrl, iconUrl, shadowUrl })

But remember, none of this will work without that .d.ts file, otherwise tsc is just going to wonder what the hell you mean by require.

Posted 2018-05-22

The basic question is, how do we read an entire graph from a Neo4j store into a NetworkX graph? And another question is, how do we extract subgraphs from Cypher and recreate them in NetworkX, to potentially save memory?

Using a naive query to read all relationships

This is based on cypher-ipython module. This uses a simple query like the following to obtain all the data:

MATCH (n) OPTIONAL MATCH (n)-[r]->() RETURN n, r

This can be read into a graph using the following code. Note that the rows may duplicate both relationships and nodes, but this is taken care of by the use of neo4j IDs.

def rs2graph(rs):
    graph = networkx.MultiDiGraph()

    for record in rs:
        node = record['n']
        if node:
            print("adding node")
            nx_properties = {}
            nx_properties.update(node.properties)
            nx_properties['labels'] = node.labels
            graph.add_node(node.id, **nx_properties)

        relationship = record['r']
        if relationship is not None:   # essential because relationships use hash val
            print("adding edge")
            graph.add_edge(
                relationship.start, relationship.end, key=relationship.type,
                **relationship.properties
            )

    return graph

There's something about this query that is rather inelegant, that is that the result set is essentially 'denormalized'.

Using aggregation functions

Luckily there's another more SQL-ish way to do it, which is to COLLECT the relationships of each node into an array. This then returns lists which represent a distinct node and the complete set of relationships for that node, similar to something like the ARRAY_AGG() and GROUP BY combination in PostgreSQL. This seems much cleaner to me.

# this version expects a collection of rels in the variable 'rels'
# But, this version doesn't handle dangling references
def rs2graph_v2(rs):
    graph = networkx.MultiDiGraph()

    for record in rs:
        node = record['n2']
        if not node:
            raise Exception('every row should have a node')

        print("adding node")
        nx_properties = {}
        nx_properties.update(node.properties)
        nx_properties['labels'] = list(node.labels)
        graph.add_node(node.id, **nx_properties)

        relationship_list = record['rels']

        for relationship in relationship_list:
            print("adding edge")
            graph.add_edge(
                relationship.start, relationship.end, key=relationship.type,
                **relationship.properties
            )

    return graph

Trying to extend to handle subgraphs

When we have relationship types that define subtrees, which are labelled something like :PRECEDES in this case, we can attempt to materialize this sub-graph selected from a given root in memory. In the query below, the Token node with content nonesuch is taken as the root.

This version can be used with a Cypher query like the following:

MATCH (a:Token {content: "nonesuch"})-[:PRECEDES*]->(t:Token)
WITH COLLECT(a) + COLLECT(DISTINCT t) AS nodes_
UNWIND nodes_ AS n
OPTIONAL MATCH p = (n)-[r]-()
WITH n AS n2, COLLECT(DISTINCT RELATIONSHIPS(p)) AS nestedrel
RETURN n2, REDUCE(output = [], rel in nestedrel | output + rel) AS rels

And the Python code to read the result of this query is as such:

# This version has to materialize the entire node set up front in order
# to check for dangling references.  This may induce memory problems in large
# result sets
def rs2graph_v3(rs):
    graph = networkx.MultiDiGraph()

    materialized_result_set = list(rs)
    node_id_set = set([
        record['n2'].id for record in materialized_result_set
    ])

    for record in materialized_result_set:
        node = record['n2']
        if not node:
            raise Exception('every row should have a node')

        print("adding node")
        nx_properties = {}
        nx_properties.update(node.properties)
        nx_properties['labels'] = list(node.labels)
        graph.add_node(node.id, **nx_properties)

        relationship_list = record['rels']

        for relationship in relationship_list:
            print("adding edge")

            # Bear in mind that when we ask for all relationships on a node,
            # we may find a node that PRECEDES the current node -- i.e. a node
            # whose relationship starts outside the current subgraph returned
            # by this query.
            if relationship.start in node_id_set:
                graph.add_edge(
                    relationship.start, relationship.end, key=relationship.type,
                    **relationship.properties
                )
            else:
                print("ignoring dangling relationship [no need to worry]")

    return graph
Posted 2018-05-09

This blog is powered by coffee and ikiwiki.